hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
12ee73c126b23814ff3bb5978454bbd526ed326b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <AdePT/BlockData.h>
struct MyTrack {
int index{0};
int pdg{0};
double energy{0};
double pos[3]{0};
double dir[3]{0};
bool flag1;
bool flag2;
};
struct Scoring {
adept::Atomic_t<int> secondaries;
adept::Atomic_t<float> totalEnergyLoss;
VECCORE_ATT_HOST_DEVICE
Scoring() {}
VECCORE_ATT_HOST_DEVICE
static Scoring *MakeInstanceAt(void *addr)
{
Scoring *obj = new (addr) Scoring();
return obj;
}
};
// kernel function that does energy loss or pair production
__global__ void process(adept::BlockData<MyTrack> *block, Scoring *scor, hiprandState_t *states)
{
int particle_index = blockIdx.x * blockDim.x + threadIdx.x;
// check if you are not outside the used block
if (particle_index > block->GetNused() + block->GetNholes()) return;
// check if the particle is still alive (E>0)
if ((*block)[particle_index].energy == 0) return;
// generate random number
float r = hiprand_uniform(states);
// call the 'process'
if (r < 0.5f) {
// energy loss
float eloss = 0.2f * (*block)[particle_index].energy;
scor->totalEnergyLoss.fetch_add(eloss < 0.001f ? (*block)[particle_index].energy : eloss);
(*block)[particle_index].energy = (eloss < 0.001f ? 0.0f : ((*block)[particle_index].energy - eloss));
// if particle dies (E=0) release the slot
if ((*block)[particle_index].energy < 0.001f) block->ReleaseElement(particle_index);
} else {
// pair production
float eloss = 0.5f * (*block)[particle_index].energy;
(*block)[particle_index].energy -= eloss;
// here I need to create a new particle
auto secondary_track = block->NextElement();
assert(secondary_track != nullptr && "No slot available for secondary track");
secondary_track->energy = eloss;
// increase the counter of secondaries
scor->secondaries.fetch_add(1);
}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(hiprandState_t *states)
{
/* we have to initialize the state */
hiprand_init(0, 0, 0, states);
}
//
int main()
{
hiprandState_t *state;
hipMalloc((void **)&state, sizeof(hiprandState_t));
hipLaunchKernelGGL(( init), dim3(1), dim3(1), 0, 0, state);
hipDeviceSynchronize();
// Track capacity of the block
constexpr int capacity = 1 << 20;
// Allocate the content of Scoring in a buffer
char *buffer1 = nullptr;
hipMallocManaged(&buffer1, sizeof(Scoring));
Scoring *scor = Scoring::MakeInstanceAt(buffer1);
// Initialize scoring
scor->secondaries = 0;
scor->totalEnergyLoss = 0;
// Allocate a block of tracks with capacity larger than the total number of spawned threads
// Note that if we want to allocate several consecutive block in a buffer, we have to use
// Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block
using Block_t = adept::BlockData<MyTrack>;
size_t blocksize = Block_t::SizeOfInstance(capacity);
char *buffer2 = nullptr;
hipMallocManaged(&buffer2, blocksize);
auto block = Block_t::MakeInstanceAt(capacity, buffer2);
// initializing one track in the block
auto track = block->NextElement();
track->energy = 100.0f;
// initializing second track in the block
auto track2 = block->NextElement();
track2->energy = 30.0f;
hipDeviceSynchronize();
//
constexpr dim3 nthreads(32);
dim3 numBlocks;
while (block->GetNused()) {
numBlocks.x = (block->GetNused() + block->GetNholes() + nthreads.x - 1) / nthreads.x;
// call the kernels
hipLaunchKernelGGL(( process), dim3(numBlocks), dim3(nthreads), 0, 0, block, scor, state);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
std::cout << "Total energy loss " << scor->totalEnergyLoss.load() << " number of secondaries "
<< scor->secondaries.load() << " blocks used " << block->GetNused() << std::endl;
}
}
| 12ee73c126b23814ff3bb5978454bbd526ed326b.cu | // SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <AdePT/BlockData.h>
struct MyTrack {
int index{0};
int pdg{0};
double energy{0};
double pos[3]{0};
double dir[3]{0};
bool flag1;
bool flag2;
};
struct Scoring {
adept::Atomic_t<int> secondaries;
adept::Atomic_t<float> totalEnergyLoss;
VECCORE_ATT_HOST_DEVICE
Scoring() {}
VECCORE_ATT_HOST_DEVICE
static Scoring *MakeInstanceAt(void *addr)
{
Scoring *obj = new (addr) Scoring();
return obj;
}
};
// kernel function that does energy loss or pair production
__global__ void process(adept::BlockData<MyTrack> *block, Scoring *scor, curandState_t *states)
{
int particle_index = blockIdx.x * blockDim.x + threadIdx.x;
// check if you are not outside the used block
if (particle_index > block->GetNused() + block->GetNholes()) return;
// check if the particle is still alive (E>0)
if ((*block)[particle_index].energy == 0) return;
// generate random number
float r = curand_uniform(states);
// call the 'process'
if (r < 0.5f) {
// energy loss
float eloss = 0.2f * (*block)[particle_index].energy;
scor->totalEnergyLoss.fetch_add(eloss < 0.001f ? (*block)[particle_index].energy : eloss);
(*block)[particle_index].energy = (eloss < 0.001f ? 0.0f : ((*block)[particle_index].energy - eloss));
// if particle dies (E=0) release the slot
if ((*block)[particle_index].energy < 0.001f) block->ReleaseElement(particle_index);
} else {
// pair production
float eloss = 0.5f * (*block)[particle_index].energy;
(*block)[particle_index].energy -= eloss;
// here I need to create a new particle
auto secondary_track = block->NextElement();
assert(secondary_track != nullptr && "No slot available for secondary track");
secondary_track->energy = eloss;
// increase the counter of secondaries
scor->secondaries.fetch_add(1);
}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(curandState_t *states)
{
/* we have to initialize the state */
curand_init(0, 0, 0, states);
}
//
int main()
{
curandState_t *state;
cudaMalloc((void **)&state, sizeof(curandState_t));
init<<<1, 1>>>(state);
cudaDeviceSynchronize();
// Track capacity of the block
constexpr int capacity = 1 << 20;
// Allocate the content of Scoring in a buffer
char *buffer1 = nullptr;
cudaMallocManaged(&buffer1, sizeof(Scoring));
Scoring *scor = Scoring::MakeInstanceAt(buffer1);
// Initialize scoring
scor->secondaries = 0;
scor->totalEnergyLoss = 0;
// Allocate a block of tracks with capacity larger than the total number of spawned threads
// Note that if we want to allocate several consecutive block in a buffer, we have to use
// Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block
using Block_t = adept::BlockData<MyTrack>;
size_t blocksize = Block_t::SizeOfInstance(capacity);
char *buffer2 = nullptr;
cudaMallocManaged(&buffer2, blocksize);
auto block = Block_t::MakeInstanceAt(capacity, buffer2);
// initializing one track in the block
auto track = block->NextElement();
track->energy = 100.0f;
// initializing second track in the block
auto track2 = block->NextElement();
track2->energy = 30.0f;
cudaDeviceSynchronize();
//
constexpr dim3 nthreads(32);
dim3 numBlocks;
while (block->GetNused()) {
numBlocks.x = (block->GetNused() + block->GetNholes() + nthreads.x - 1) / nthreads.x;
// call the kernels
process<<<numBlocks, nthreads>>>(block, scor, state);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
std::cout << "Total energy loss " << scor->totalEnergyLoss.load() << " number of secondaries "
<< scor->secondaries.load() << " blocks used " << block->GetNused() << std::endl;
}
}
|
e89308f942f227812606854b607558daa611c65f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void test(float *A, const int N){
int i = threadIdx.x;
if (i < N){
A[i] = A[i+1];
}
} | e89308f942f227812606854b607558daa611c65f.cu | __global__ void test(float *A, const int N){
int i = threadIdx.x;
if (i < N){
A[i] = A[i+1];
}
} |
fb657ac32df421fec4cb7830be2b1b73a6e4aae8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <index_helper.cuh>
#include <generics/ldg.h>
namespace quda {
#ifdef GPU_GAUGE_FORCE
template <typename Mom, typename Gauge>
struct GaugeForceArg {
Mom mom;
const Gauge u;
int threads;
int X[4]; // the regular volume parameters
int E[4]; // the extended volume parameters
int border[4]; // radius of border
int num_paths;
int path_max_length;
double coeff;
const int *input_path_d[4];
const int *length_d;
const double *path_coeff_d;
int count; // equal to sum of all path lengths. Used a convenience for computing perf
GaugeForceArg(Mom &mom, const Gauge &u, int num_paths, int path_max_length, double coeff,
int **input_path_d, const int *length_d, const double* path_coeff_d, int count,
const GaugeField &meta_mom, const GaugeField &meta_u)
: mom(mom), u(u), threads(meta_mom.VolumeCB()), num_paths(num_paths),
path_max_length(path_max_length), coeff(coeff),
input_path_d{ input_path_d[0], input_path_d[1], input_path_d[2], input_path_d[3] },
length_d(length_d), path_coeff_d(path_coeff_d), count(count)
{
for(int i=0; i<4; i++) {
X[i] = meta_mom.X()[i];
E[i] = meta_u.X()[i];
border[i] = (E[i] - X[i])/2;
}
}
virtual ~GaugeForceArg() { }
};
__device__ __host__ inline static int flipDir(int dir) { return (7-dir); }
__device__ __host__ inline static bool isForwards(int dir) { return (dir <= 3); }
// this ensures that array elements are held in cache
template <typename T>
__device__ __host__ inline static T cache(const T *ptr, int idx) {
#ifdef __CUDA_ARCH__
return __ldg(ptr+idx);
#else
return ptr[idx];
#endif
}
template<typename Float, typename Arg, int dir>
__device__ __host__ inline void GaugeForceKernel(Arg &arg, int idx, int parity)
{
typedef Matrix<complex<Float>,3> Link;
int x[4] = {0, 0, 0, 0};
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
//linkA: current matrix
//linkB: the loaded matrix in this round
Link linkA, linkB, staple;
#ifdef __CUDA_ARCH__
extern __shared__ int s[];
int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x;
s[tid] = 0;
signed char *dx = (signed char*)&s[tid];
#else
int dx[4] = {0, 0, 0, 0};
#endif
for (int i=0; i<arg.num_paths; i++) {
Float coeff = cache(arg.path_coeff_d,i);
if (coeff == 0) continue;
const int* path = arg.input_path_d[dir] + i*arg.path_max_length;
// start from end of link in direction dir
int nbr_oddbit = (parity^1);
dx[dir]++;
int path0 = cache(path,0);
int lnkdir = isForwards(path0) ? path0 : flipDir(path0);
if (isForwards(path0)) {
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkB;
dx[lnkdir]++; // now have to update location
nbr_oddbit = nbr_oddbit^1;
} else {
dx[lnkdir]--; // if we are going backwards the link is on the adjacent site
nbr_oddbit = nbr_oddbit^1;
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = conj(linkB);
}
for (int j=1; j<cache(arg.length_d,i); j++) {
int pathj = cache(path,j);
int lnkdir = isForwards(pathj) ? pathj : flipDir(pathj);
if (isForwards(pathj)) {
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkA * linkB;
dx[lnkdir]++; // now have to update to new location
nbr_oddbit = nbr_oddbit^1;
} else {
dx[lnkdir]--; // if we are going backwards the link is on the adjacent site
nbr_oddbit = nbr_oddbit^1;
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkA * conj(linkB);
}
} //j
staple = staple + coeff*linkA;
} //i
// multiply by U(x)
linkA = arg.u(dir, linkIndex(x,arg.E), parity);
linkA = linkA * staple;
// update mom(x)
Link mom = arg.mom(dir, idx, parity);
mom = mom - arg.coeff * linkA;
makeAntiHerm(mom);
arg.mom(dir, idx, parity) = mom;
return;
}
template <typename Float, typename Arg>
void GaugeForceCPU(Arg &arg) {
for (int dir=0; dir<4; dir++) {
for (int parity=0; parity<2; parity++) {
for (int idx=0; idx<arg.threads; idx++) {
switch(dir) {
case 0:
GaugeForceKernel<Float,Arg,0>(arg, idx, parity);
break;
case 1:
GaugeForceKernel<Float,Arg,1>(arg, idx, parity);
break;
case 2:
GaugeForceKernel<Float,Arg,2>(arg, idx, parity);
break;
case 3:
GaugeForceKernel<Float,Arg,3>(arg, idx, parity);
break;
}
}
}
}
return;
}
template <typename Float, typename Arg>
__global__ void GaugeForceGPU(Arg arg) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= arg.threads) return;
int parity = blockIdx.y * blockDim.y + threadIdx.y;
int dir = blockIdx.z * blockDim.z + threadIdx.z;
switch(dir) {
case 0:
GaugeForceKernel<Float,Arg,0>(arg, idx, parity);
break;
case 1:
GaugeForceKernel<Float,Arg,1>(arg, idx, parity);
break;
case 2:
GaugeForceKernel<Float,Arg,2>(arg, idx, parity);
break;
case 3:
GaugeForceKernel<Float,Arg,3>(arg, idx, parity);
break;
}
return;
}
template <typename Float, typename Arg>
class GaugeForce : public TunableVectorY {
private:
Arg &arg;
QudaFieldLocation location;
const char *vol_str;
unsigned int sharedBytesPerThread() const { return 4; } // for dynamic indexing array
unsigned int minThreads() const { return arg.threads; }
bool tuneGridDim() const { return false; } // don't tune the grid dimension
public:
GaugeForce(Arg &arg, const GaugeField &meta_mom, const GaugeField &meta_u)
: TunableVectorY(2), arg(arg), location(meta_mom.Location()), vol_str(meta_mom.VolString()) { }
virtual ~GaugeForce() { }
void apply(const hipStream_t &stream) {
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( GaugeForceGPU<Float,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
} else {
GaugeForceCPU<Float,Arg>(arg);
}
}
void preTune() { arg.mom.save(); }
void postTune() { arg.mom.load(); }
long long flops() const { return (arg.count - arg.num_paths + 1) * 198ll * 2 * arg.mom.volumeCB * 4; }
long long bytes() const { return ((arg.count + 1ll) * arg.u.Bytes() + 2ll*arg.mom.Bytes()) * 2 * arg.mom.volumeCB * 4; }
TuneKey tuneKey() const {
std::stringstream aux;
char comm[5];
comm[0] = (commDimPartitioned(0) ? '1' : '0');
comm[1] = (commDimPartitioned(1) ? '1' : '0');
comm[2] = (commDimPartitioned(2) ? '1' : '0');
comm[3] = (commDimPartitioned(3) ? '1' : '0');
comm[4] = '\0';
aux << "comm=" << comm << ",threads=" << arg.threads << ",num_paths=" << arg.num_paths;
return TuneKey(vol_str, typeid(*this).name(), aux.str().c_str());
}
bool advanceBlockDim(TuneParam ¶m) const {
dim3 block = param.block;
dim3 grid = param.grid;
bool rtn = TunableVectorY::advanceBlockDim(param);
param.block.z = block.z;
param.grid.z = grid.z;
if (!rtn) {
if (param.block.z < 4) {
param.block.z *= 2;
param.grid.z = 4 / param.block.z;
rtn = true;
} else {
param.block.z = 1;
param.grid.z = 4;
rtn = false;
}
}
return rtn;
}
void initTuneParam(TuneParam ¶m) const {
TunableVectorY::initTuneParam(param);
param.block.z = 1;
param.grid.z = 4;
}
void defaultTuneParam(TuneParam ¶m) const {
TunableVectorY::defaultTuneParam(param);
param.block.z = 1;
param.grid.z = 4;
}
};
template <typename Float, typename Mom, typename Gauge>
void gaugeForce(Mom mom, const Gauge &u, GaugeField& meta_mom, const GaugeField& meta_u, const double coeff,
int ***input_path, const int* length_h, const double* path_coeff_h, const int num_paths, const int path_max_length)
{
size_t bytes = num_paths*path_max_length*sizeof(int);
int *input_path_d[4];
int count = 0;
for (int dir=0; dir<4; dir++) {
input_path_d[dir] = (int*)pool_device_malloc(bytes);
hipMemset(input_path_d[dir], 0, bytes);
int* input_path_h = (int*)safe_malloc(bytes);
memset(input_path_h, 0, bytes);
// flatten the input_path array for copying to the device
for (int i=0; i < num_paths; i++) {
for (int j=0; j < length_h[i]; j++) {
input_path_h[i*path_max_length + j] = input_path[dir][i][j];
if (dir==0) count++;
}
}
qudaMemcpy(input_path_d[dir], input_path_h, bytes, hipMemcpyHostToDevice);
host_free(input_path_h);
}
//length
int* length_d = (int*)pool_device_malloc(num_paths*sizeof(int));
qudaMemcpy(length_d, length_h, num_paths*sizeof(int), hipMemcpyHostToDevice);
//path_coeff
double* path_coeff_d = (double*)pool_device_malloc(num_paths*sizeof(double));
qudaMemcpy(path_coeff_d, path_coeff_h, num_paths*sizeof(double), hipMemcpyHostToDevice);
GaugeForceArg<Mom,Gauge> arg(mom, u, num_paths, path_max_length, coeff, input_path_d,
length_d, path_coeff_d, count, meta_mom, meta_u);
GaugeForce<Float,GaugeForceArg<Mom,Gauge> > gauge_force(arg, meta_mom, meta_u);
gauge_force.apply(0);
checkCudaError();
pool_device_free(length_d);
pool_device_free(path_coeff_d);
for (int dir=0; dir<4; dir++) pool_device_free(input_path_d[dir]);
qudaDeviceSynchronize();
}
template <typename Float>
void gaugeForce(GaugeField& mom, const GaugeField& u, const double coeff, int ***input_path,
const int* length, const double* path_coeff, const int num_paths, const int max_length)
{
if (mom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Reconstruction type %d not supported", mom.Reconstruct());
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
typedef typename gauge::FloatNOrder<Float,18,2,11> M;
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
} else {
errorQuda("Reconstruction type %d not supported", u.Reconstruct());
}
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
#endif // GPU_GAUGE_FORCE
void gaugeForce(GaugeField& mom, const GaugeField& u, double coeff, int ***input_path,
int *length, double *path_coeff, int num_paths, int max_length)
{
#ifdef GPU_GAUGE_FORCE
if (mom.Precision() != u.Precision()) errorQuda("Mixed precision not supported");
if (mom.Location() != u.Location()) errorQuda("Mixed field locations not supported");
switch(mom.Precision()) {
case QUDA_DOUBLE_PRECISION:
gaugeForce<double>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
break;
case QUDA_SINGLE_PRECISION:
gaugeForce<float>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
break;
default:
errorQuda("Unsupported precision %d", mom.Precision());
}
#else
errorQuda("Gauge force has not been built");
#endif // GPU_GAUGE_FORCE
}
} // namespace quda
| fb657ac32df421fec4cb7830be2b1b73a6e4aae8.cu | #include <gauge_field_order.h>
#include <quda_matrix.h>
#include <index_helper.cuh>
#include <generics/ldg.h>
namespace quda {
#ifdef GPU_GAUGE_FORCE
template <typename Mom, typename Gauge>
struct GaugeForceArg {
Mom mom;
const Gauge u;
int threads;
int X[4]; // the regular volume parameters
int E[4]; // the extended volume parameters
int border[4]; // radius of border
int num_paths;
int path_max_length;
double coeff;
const int *input_path_d[4];
const int *length_d;
const double *path_coeff_d;
int count; // equal to sum of all path lengths. Used a convenience for computing perf
GaugeForceArg(Mom &mom, const Gauge &u, int num_paths, int path_max_length, double coeff,
int **input_path_d, const int *length_d, const double* path_coeff_d, int count,
const GaugeField &meta_mom, const GaugeField &meta_u)
: mom(mom), u(u), threads(meta_mom.VolumeCB()), num_paths(num_paths),
path_max_length(path_max_length), coeff(coeff),
input_path_d{ input_path_d[0], input_path_d[1], input_path_d[2], input_path_d[3] },
length_d(length_d), path_coeff_d(path_coeff_d), count(count)
{
for(int i=0; i<4; i++) {
X[i] = meta_mom.X()[i];
E[i] = meta_u.X()[i];
border[i] = (E[i] - X[i])/2;
}
}
virtual ~GaugeForceArg() { }
};
__device__ __host__ inline static int flipDir(int dir) { return (7-dir); }
__device__ __host__ inline static bool isForwards(int dir) { return (dir <= 3); }
// this ensures that array elements are held in cache
template <typename T>
__device__ __host__ inline static T cache(const T *ptr, int idx) {
#ifdef __CUDA_ARCH__
return __ldg(ptr+idx);
#else
return ptr[idx];
#endif
}
template<typename Float, typename Arg, int dir>
__device__ __host__ inline void GaugeForceKernel(Arg &arg, int idx, int parity)
{
typedef Matrix<complex<Float>,3> Link;
int x[4] = {0, 0, 0, 0};
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
//linkA: current matrix
//linkB: the loaded matrix in this round
Link linkA, linkB, staple;
#ifdef __CUDA_ARCH__
extern __shared__ int s[];
int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x;
s[tid] = 0;
signed char *dx = (signed char*)&s[tid];
#else
int dx[4] = {0, 0, 0, 0};
#endif
for (int i=0; i<arg.num_paths; i++) {
Float coeff = cache(arg.path_coeff_d,i);
if (coeff == 0) continue;
const int* path = arg.input_path_d[dir] + i*arg.path_max_length;
// start from end of link in direction dir
int nbr_oddbit = (parity^1);
dx[dir]++;
int path0 = cache(path,0);
int lnkdir = isForwards(path0) ? path0 : flipDir(path0);
if (isForwards(path0)) {
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkB;
dx[lnkdir]++; // now have to update location
nbr_oddbit = nbr_oddbit^1;
} else {
dx[lnkdir]--; // if we are going backwards the link is on the adjacent site
nbr_oddbit = nbr_oddbit^1;
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = conj(linkB);
}
for (int j=1; j<cache(arg.length_d,i); j++) {
int pathj = cache(path,j);
int lnkdir = isForwards(pathj) ? pathj : flipDir(pathj);
if (isForwards(pathj)) {
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkA * linkB;
dx[lnkdir]++; // now have to update to new location
nbr_oddbit = nbr_oddbit^1;
} else {
dx[lnkdir]--; // if we are going backwards the link is on the adjacent site
nbr_oddbit = nbr_oddbit^1;
linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit);
linkA = linkA * conj(linkB);
}
} //j
staple = staple + coeff*linkA;
} //i
// multiply by U(x)
linkA = arg.u(dir, linkIndex(x,arg.E), parity);
linkA = linkA * staple;
// update mom(x)
Link mom = arg.mom(dir, idx, parity);
mom = mom - arg.coeff * linkA;
makeAntiHerm(mom);
arg.mom(dir, idx, parity) = mom;
return;
}
template <typename Float, typename Arg>
void GaugeForceCPU(Arg &arg) {
for (int dir=0; dir<4; dir++) {
for (int parity=0; parity<2; parity++) {
for (int idx=0; idx<arg.threads; idx++) {
switch(dir) {
case 0:
GaugeForceKernel<Float,Arg,0>(arg, idx, parity);
break;
case 1:
GaugeForceKernel<Float,Arg,1>(arg, idx, parity);
break;
case 2:
GaugeForceKernel<Float,Arg,2>(arg, idx, parity);
break;
case 3:
GaugeForceKernel<Float,Arg,3>(arg, idx, parity);
break;
}
}
}
}
return;
}
template <typename Float, typename Arg>
__global__ void GaugeForceGPU(Arg arg) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= arg.threads) return;
int parity = blockIdx.y * blockDim.y + threadIdx.y;
int dir = blockIdx.z * blockDim.z + threadIdx.z;
switch(dir) {
case 0:
GaugeForceKernel<Float,Arg,0>(arg, idx, parity);
break;
case 1:
GaugeForceKernel<Float,Arg,1>(arg, idx, parity);
break;
case 2:
GaugeForceKernel<Float,Arg,2>(arg, idx, parity);
break;
case 3:
GaugeForceKernel<Float,Arg,3>(arg, idx, parity);
break;
}
return;
}
template <typename Float, typename Arg>
class GaugeForce : public TunableVectorY {
private:
Arg &arg;
QudaFieldLocation location;
const char *vol_str;
unsigned int sharedBytesPerThread() const { return 4; } // for dynamic indexing array
unsigned int minThreads() const { return arg.threads; }
bool tuneGridDim() const { return false; } // don't tune the grid dimension
public:
GaugeForce(Arg &arg, const GaugeField &meta_mom, const GaugeField &meta_u)
: TunableVectorY(2), arg(arg), location(meta_mom.Location()), vol_str(meta_mom.VolString()) { }
virtual ~GaugeForce() { }
void apply(const cudaStream_t &stream) {
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
GaugeForceGPU<Float,Arg><<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
} else {
GaugeForceCPU<Float,Arg>(arg);
}
}
void preTune() { arg.mom.save(); }
void postTune() { arg.mom.load(); }
long long flops() const { return (arg.count - arg.num_paths + 1) * 198ll * 2 * arg.mom.volumeCB * 4; }
long long bytes() const { return ((arg.count + 1ll) * arg.u.Bytes() + 2ll*arg.mom.Bytes()) * 2 * arg.mom.volumeCB * 4; }
TuneKey tuneKey() const {
std::stringstream aux;
char comm[5];
comm[0] = (commDimPartitioned(0) ? '1' : '0');
comm[1] = (commDimPartitioned(1) ? '1' : '0');
comm[2] = (commDimPartitioned(2) ? '1' : '0');
comm[3] = (commDimPartitioned(3) ? '1' : '0');
comm[4] = '\0';
aux << "comm=" << comm << ",threads=" << arg.threads << ",num_paths=" << arg.num_paths;
return TuneKey(vol_str, typeid(*this).name(), aux.str().c_str());
}
bool advanceBlockDim(TuneParam ¶m) const {
dim3 block = param.block;
dim3 grid = param.grid;
bool rtn = TunableVectorY::advanceBlockDim(param);
param.block.z = block.z;
param.grid.z = grid.z;
if (!rtn) {
if (param.block.z < 4) {
param.block.z *= 2;
param.grid.z = 4 / param.block.z;
rtn = true;
} else {
param.block.z = 1;
param.grid.z = 4;
rtn = false;
}
}
return rtn;
}
void initTuneParam(TuneParam ¶m) const {
TunableVectorY::initTuneParam(param);
param.block.z = 1;
param.grid.z = 4;
}
void defaultTuneParam(TuneParam ¶m) const {
TunableVectorY::defaultTuneParam(param);
param.block.z = 1;
param.grid.z = 4;
}
};
template <typename Float, typename Mom, typename Gauge>
void gaugeForce(Mom mom, const Gauge &u, GaugeField& meta_mom, const GaugeField& meta_u, const double coeff,
int ***input_path, const int* length_h, const double* path_coeff_h, const int num_paths, const int path_max_length)
{
size_t bytes = num_paths*path_max_length*sizeof(int);
int *input_path_d[4];
int count = 0;
for (int dir=0; dir<4; dir++) {
input_path_d[dir] = (int*)pool_device_malloc(bytes);
cudaMemset(input_path_d[dir], 0, bytes);
int* input_path_h = (int*)safe_malloc(bytes);
memset(input_path_h, 0, bytes);
// flatten the input_path array for copying to the device
for (int i=0; i < num_paths; i++) {
for (int j=0; j < length_h[i]; j++) {
input_path_h[i*path_max_length + j] = input_path[dir][i][j];
if (dir==0) count++;
}
}
qudaMemcpy(input_path_d[dir], input_path_h, bytes, cudaMemcpyHostToDevice);
host_free(input_path_h);
}
//length
int* length_d = (int*)pool_device_malloc(num_paths*sizeof(int));
qudaMemcpy(length_d, length_h, num_paths*sizeof(int), cudaMemcpyHostToDevice);
//path_coeff
double* path_coeff_d = (double*)pool_device_malloc(num_paths*sizeof(double));
qudaMemcpy(path_coeff_d, path_coeff_h, num_paths*sizeof(double), cudaMemcpyHostToDevice);
GaugeForceArg<Mom,Gauge> arg(mom, u, num_paths, path_max_length, coeff, input_path_d,
length_d, path_coeff_d, count, meta_mom, meta_u);
GaugeForce<Float,GaugeForceArg<Mom,Gauge> > gauge_force(arg, meta_mom, meta_u);
gauge_force.apply(0);
checkCudaError();
pool_device_free(length_d);
pool_device_free(path_coeff_d);
for (int dir=0; dir<4; dir++) pool_device_free(input_path_d[dir]);
qudaDeviceSynchronize();
}
template <typename Float>
void gaugeForce(GaugeField& mom, const GaugeField& u, const double coeff, int ***input_path,
const int* length, const double* path_coeff, const int num_paths, const int max_length)
{
if (mom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Reconstruction type %d not supported", mom.Reconstruct());
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
typedef typename gauge::FloatNOrder<Float,18,2,11> M;
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
} else {
errorQuda("Reconstruction type %d not supported", u.Reconstruct());
}
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
#endif // GPU_GAUGE_FORCE
void gaugeForce(GaugeField& mom, const GaugeField& u, double coeff, int ***input_path,
int *length, double *path_coeff, int num_paths, int max_length)
{
#ifdef GPU_GAUGE_FORCE
if (mom.Precision() != u.Precision()) errorQuda("Mixed precision not supported");
if (mom.Location() != u.Location()) errorQuda("Mixed field locations not supported");
switch(mom.Precision()) {
case QUDA_DOUBLE_PRECISION:
gaugeForce<double>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
break;
case QUDA_SINGLE_PRECISION:
gaugeForce<float>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length);
break;
default:
errorQuda("Unsupported precision %d", mom.Precision());
}
#else
errorQuda("Gauge force has not been built");
#endif // GPU_GAUGE_FORCE
}
} // namespace quda
|
2afbdb811ae2d24c6fe76a2e281ab3092e867347.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 128
#define N 96
#define H 14
#define W 14
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[32];
__shared__ float kernel_shared[128];
float pad_temp_shared_local[8];
float kernel_shared_local[4];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 15))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[(((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 9))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 18))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 27))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 36))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 45))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 54))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 63))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1152))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1161))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1170))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1179))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1188))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1197))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1206))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1215))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 10))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 19))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 28))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 37))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 46))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 55))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 64))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1153))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1162))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1171))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1180))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1189))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1198))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1207))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1216))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 12))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 2))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 11))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 20))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 29))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 38))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 47))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 56))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 65))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1154))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1163))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1172))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1181))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1190))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1199))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1208))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1217))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
}
}
compute[(((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1568))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1569))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 14))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1582))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 15))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1583))] = compute_local[(7)];
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(7,7,6);
dim3 block(1,1,8);
float * paddedInputDevice;
chkerr(hipMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(hipMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), hipMemcpyHostToDevice));
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
| 2afbdb811ae2d24c6fe76a2e281ab3092e867347.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 128
#define N 96
#define H 14
#define W 14
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[32];
__shared__ float kernel_shared[128];
float pad_temp_shared_local[8];
float kernel_shared_local[4];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 15))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[(((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 9))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 18))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 27))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 36))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 45))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 54))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 63))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1152))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1161))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1170))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1179))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1188))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1197))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1206))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1215))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 10))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 19))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 28))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 37))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 46))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 55))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 64))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1153))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1162))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1171))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1180))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1189))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1198))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1207))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1216))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 12))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 2))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 11))];
kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 20))];
kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 29))];
kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 38))];
kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 47))];
kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 56))];
kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 65))];
kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1154))];
kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1163))];
kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1172))];
kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1181))];
kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1190))];
kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1199))];
kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1208))];
kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1217))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(0)];
pad_temp_shared_local[(1)] = pad_temp_shared[(1)];
pad_temp_shared_local[(2)] = pad_temp_shared[(2)];
pad_temp_shared_local[(3)] = pad_temp_shared[(3)];
pad_temp_shared_local[(4)] = pad_temp_shared[(4)];
pad_temp_shared_local[(5)] = pad_temp_shared[(5)];
pad_temp_shared_local[(6)] = pad_temp_shared[(6)];
pad_temp_shared_local[(7)] = pad_temp_shared[(7)];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(8)];
pad_temp_shared_local[(1)] = pad_temp_shared[(9)];
pad_temp_shared_local[(2)] = pad_temp_shared[(10)];
pad_temp_shared_local[(3)] = pad_temp_shared[(11)];
pad_temp_shared_local[(4)] = pad_temp_shared[(12)];
pad_temp_shared_local[(5)] = pad_temp_shared[(13)];
pad_temp_shared_local[(6)] = pad_temp_shared[(14)];
pad_temp_shared_local[(7)] = pad_temp_shared[(15)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(16)];
pad_temp_shared_local[(1)] = pad_temp_shared[(17)];
pad_temp_shared_local[(2)] = pad_temp_shared[(18)];
pad_temp_shared_local[(3)] = pad_temp_shared[(19)];
pad_temp_shared_local[(4)] = pad_temp_shared[(20)];
pad_temp_shared_local[(5)] = pad_temp_shared[(21)];
pad_temp_shared_local[(6)] = pad_temp_shared[(22)];
pad_temp_shared_local[(7)] = pad_temp_shared[(23)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(24)];
pad_temp_shared_local[(1)] = pad_temp_shared[(25)];
pad_temp_shared_local[(2)] = pad_temp_shared[(26)];
pad_temp_shared_local[(3)] = pad_temp_shared[(27)];
pad_temp_shared_local[(4)] = pad_temp_shared[(28)];
pad_temp_shared_local[(5)] = pad_temp_shared[(29)];
pad_temp_shared_local[(6)] = pad_temp_shared[(30)];
pad_temp_shared_local[(7)] = pad_temp_shared[(31)];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
}
}
compute[(((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1568))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1569))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 14))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1582))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 15))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1583))] = compute_local[(7)];
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(7,7,6);
dim3 block(1,1,8);
float * paddedInputDevice;
chkerr(cudaMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(cudaMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
d6dfc063f93d52c6a8597762869733e2593c519e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
//M = AllocateMatrix(rand() %1024, rand() % 1024, 1);
//N = AllocateMatrix(M.width, rand() % 1024, 1);
M = AllocateMatrix(1024, 4096, 1);
N = AllocateMatrix(M.width, 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
printf("M hight-width = %d - %d \n",M.height,M.width);
printf("N hight-width = %d - %d \n",N.height,N.width);
printf("start running \n");
clock_t t1,t2;
float run_time;
// M * N on the device
t1=clock();
MatrixMulOnDevice(M, N, P);
t2=clock();
run_time=((float)t2-(float)t1);
printf("GPU computation complete, time={%f}\n",run_time/ CLOCKS_PER_SEC);
// compute the matrix multiplication on the CPU for comparison
t1=clock();
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
t2=clock();
run_time=((float)t2-(float)t1);
printf("CPU computation complete, time={%f}\n",run_time/ CLOCKS_PER_SEC);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
int TILEWIDTH = 32;
int GRID_DIM_X = int(Nd.width /TILEWIDTH);
int GRID_DIM_Y = int(Md.height/TILEWIDTH);
if (Nd.width %TILEWIDTH > 0) GRID_DIM_X++; // if Nd.width %TILEWIDTH is not 0, need one more block
if (Md.height%TILEWIDTH > 0) GRID_DIM_Y++; // if Md.height%TILEWIDTH is not 0, need one more block
dim3 dimGrid( GRID_DIM_X , GRID_DIM_Y , 1);
dim3 dimBlock(TILEWIDTH, TILEWIDTH, 1);
// Launch the device computation threads!
// MatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd);
hipLaunchKernelGGL(( MatrixMulKernel_SharedMemory) , dim3(dimGrid),dim3(dimBlock), 0, 0, Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| d6dfc063f93d52c6a8597762869733e2593c519e.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
//M = AllocateMatrix(rand() %1024, rand() % 1024, 1);
//N = AllocateMatrix(M.width, rand() % 1024, 1);
M = AllocateMatrix(1024, 4096, 1);
N = AllocateMatrix(M.width, 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
printf("M hight-width = %d - %d \n",M.height,M.width);
printf("N hight-width = %d - %d \n",N.height,N.width);
printf("start running \n");
clock_t t1,t2;
float run_time;
// M * N on the device
t1=clock();
MatrixMulOnDevice(M, N, P);
t2=clock();
run_time=((float)t2-(float)t1);
printf("GPU computation complete, time={%f}\n",run_time/ CLOCKS_PER_SEC);
// compute the matrix multiplication on the CPU for comparison
t1=clock();
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
t2=clock();
run_time=((float)t2-(float)t1);
printf("CPU computation complete, time={%f}\n",run_time/ CLOCKS_PER_SEC);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
int TILEWIDTH = 32;
int GRID_DIM_X = int(Nd.width /TILEWIDTH);
int GRID_DIM_Y = int(Md.height/TILEWIDTH);
if (Nd.width %TILEWIDTH > 0) GRID_DIM_X++; // if Nd.width %TILEWIDTH is not 0, need one more block
if (Md.height%TILEWIDTH > 0) GRID_DIM_Y++; // if Md.height%TILEWIDTH is not 0, need one more block
dim3 dimGrid( GRID_DIM_X , GRID_DIM_Y , 1);
dim3 dimBlock(TILEWIDTH, TILEWIDTH, 1);
// Launch the device computation threads!
// MatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd);
MatrixMulKernel_SharedMemory <<<dimGrid,dimBlock>>> (Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
588a0166a0c0cb51ec02978c8c06dda1d5a673d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<string>
#include <stdio.h>
#include "CImg.h"
using namespace cimg_library;
#include<math.h>
#include<time.h>
#include<stdlib.h>
#include <stdio.h>
#include <ctime>
#include <cmath>
#include "helper.h"
using namespace std;
#define TILE_WIDTH 8
#define MAX_MASK_WIDTH 3
__constant__ double M[MAX_MASK_WIDTH][MAX_MASK_WIDTH];
float duration_gpu, duration_cpu, duration_kernel, duration_cpumem;
__global__ void ConvolutionKernel(unsigned char *P, unsigned char *N, int height, int width) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_WIDTH + ty;
int col_o = blockIdx.x * TILE_WIDTH + tx;
if (row_o < height && col_o < width) {
int row_i = row_o - MAX_MASK_WIDTH / 2;
int col_i = col_o - MAX_MASK_WIDTH / 2;
__shared__ float N_ds[TILE_WIDTH][TILE_WIDTH];
if (row_o * width + col_o < height*width)
N_ds[ty][tx] = N[row_o * width + col_o];
// Wait until all tile elements are loaded
__syncthreads();
double Pvalue = 0.0f;
int This_tile_col_start_point = blockIdx.x * blockDim.x;
int Next_tile_col_start_point = (blockIdx.x + 1) * blockDim.x;
int This_tile_row_start_point = blockIdx.y * blockDim.y;
int Next_tile_row_start_point = (blockIdx.y + 1) * blockDim.y;
if (ty < TILE_WIDTH && tx < TILE_WIDTH) {
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int row_index = row_i + i; int col_index = col_i + j;
if ((row_index >= This_tile_row_start_point) && (row_index < Next_tile_row_start_point)&&row_index<height) {
if ((col_index >= This_tile_col_start_point) && (col_index < Next_tile_col_start_point)&&col_index<width)
Pvalue += N_ds[threadIdx.y + i - (MAX_MASK_WIDTH / 2)][threadIdx.x + j - (MAX_MASK_WIDTH / 2)] * M[i][j];
else if (row_index >= height && col_i >= width) // 1
Pvalue +=
N[(height - 1) * width + (width - 1)] * M[i][j];
else if (row_index >= height && col_index < width &&
col_index >= 0) // 2
Pvalue +=
N[(height - 1) * width + col_index] * M[i][j];
else if (row_index >= height && col_index < 0) // 3
Pvalue += N[(height - 1) * width + 0] * M[i][j];
else if (row_index < height && row_index >= 0 &&
col_index < 0) // 4
Pvalue += N[row_index * width + 0] * M[i][j];
else if (row_index < 0 && col_index < 0) // 5
Pvalue += N[0 * width + 0] * M[i][j];
else if (row_index < 0 && col_index < width &&
col_index >= 0) // 6
Pvalue += N[0 * width + col_index] * M[i][j];
else if (row_index < 0 && col_index >= width) // 7
Pvalue += N[0 * width + (width - 1)] * M[i][j];
else if (row_index >= 0 && row_index < height &&
col_index >= width) // 8
Pvalue += N[row_index * width + (width - 1)] * M[i][j];
else
Pvalue += N[row_index * width + col_index] * M[i][j];
}
else if (row_index >= height && col_index >= width) // 1
Pvalue += N[(height - 1) * width + (width - 1)] * M[i][j];
else if (row_index >= height && col_index < width &&
col_index >= 0) // 2
Pvalue +=
N[(height - 1) * width + col_index] * M[i][j];
else if (row_index >= height && col_index < 0) // 3
Pvalue +=
N[(height - 1) * width + 0] * M[i][j];
else if (row_index < height && row_index >= 0 && col_index < 0) // 4
Pvalue +=
N[row_index* width + 0] * M[i][j];
else if (row_index < 0 && col_index < 0) // 5
Pvalue +=
N[0 * width + 0] * M[i][j];
else if (row_index < 0 && col_index < width && col_index >= 0) // 6
Pvalue +=
N[0 * width + col_index] * M[i][j];
else if (row_index < 0 && col_index >= width) // 7
Pvalue +=
N[0 * width + (width - 1)] * M[i][j];
else if (row_index >= 0 && row_index < height &&
col_index >= width) // 8
Pvalue +=
N[row_index * width + (width - 1)] * M[i][j];
else
Pvalue += N[row_index * width + col_index] * M[i][j];
}
}
if (row_o < height && col_o < width) {
P[row_o * width + col_o] = Pvalue;
}
}
}
}
void gpu_kernel(unsigned char* h_in, int height, int width, double filter[][3])
{
/*
double filter[3][3] = {
{0.0625,.125,0.0625},
{0.125,0.25,0.125},
{0.0625,0.125,.0625}
};
*/
hipMemcpyToSymbol(M, filter, 3 * 3 * sizeof(double));
unsigned char* d_in, *d_out;
int n = height * width;
checkCudaErrors(hipMalloc(&d_in, sizeof(unsigned char) *n));
checkCudaErrors(hipMalloc(&d_out, sizeof(unsigned char) * n));
checkCudaErrors(hipMemcpy(d_in, h_in, sizeof(unsigned char) * n, hipMemcpyHostToDevice));
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
dim3 grids(ceil(width / 8.0), ceil(height / 8.0));
dim3 threads(8, 8);
ConvolutionKernel << <grids, threads >> > (d_out, d_in, height, width);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&duration_kernel, start, stop);
printf("Elapsed time by the Kernel: %f s\n", duration_kernel / 1000);
duration_kernel /= 1000;
unsigned char *h_img;
h_img = (unsigned char *)malloc(height * width * sizeof(unsigned char));
checkCudaErrors(hipMemcpy(h_img, d_out, sizeof(unsigned char) * n, hipMemcpyDeviceToHost));
unsigned long long count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (h_img[i * width + j] != h_in[i * width + j])
count++;
}
}
cout << count << " " << n << endl;
CImg<unsigned char> image_out(width, height, 1, 1, 0);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
image_out(j, i, 0, 0) = h_img[i * width + j];
}
}
image_out.save("out.jpg");
}
double cpu_reduce(double *h_in, int h_in_len) {
double sum = 0;
for (int i = 0; i < h_in_len; i++)
{
sum += h_in[i];
}
return sum;
}
int main()
{
double blur[3][3] = {
{0.0625,.125,0.0625},
{0.125,0.25,0.125},
{0.0625,0.125,.0625}
};
double emboss[3][3] = {
{-2,-1,0},
{-1,1,1},
{0,1,2}
};
double outline[3][3] = {
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1}
};
double sharpen[3][3] = {
{0,-1,0},
{-1,5,-1},
{0,-1,0}
};
double left[3][3] = {
{1,0,-1},
{2,0,-2},
{1,0,-1}
};
double right[3][3] = {
{-1,0,1},
{-2,0,2},
{-1,0,1}
};
double top[3][3] = {
{1,2,1},
{0,0,0},
{-1,-2,-1}
};
double bottom[3][3] = {
{-1,-2,-1},
{0,0,0},
{1,2,1}
};
string path;
cout <<"Enter the image path: ";
cin >> path;
cout << "Enter the number of the operation: \n";
printf("1 for blur. 2 for emboss. 3 for outline. 4 for sharpen\n 5 for left sobel. 6 for right sobel. 7 for top sobel. 8 for bottom sobel\n");
int type;
cin >> type;
CImg<unsigned char> image(path.c_str());
image.channel(0);
unsigned char *h_img;
h_img = (unsigned char *)malloc(image.height() *image.width() * sizeof(unsigned char));
int height = image.height();
int width = image.width();
//int height = 10;
//int width = 10;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
h_img[i * width+ j ] = (unsigned char)image(j, i, 0,0 );
//h_img[i * width + j] = 10;
}
}
int arraySize;
clock_t start;
switch (type)
{
case 1:gpu_kernel(h_img, height, width, blur);
break;
case 2:gpu_kernel(h_img, height, width, emboss);
break;
case 3:gpu_kernel(h_img, height, width,outline );
break;
case 4:gpu_kernel(h_img, height, width, sharpen);
break;
case 5:gpu_kernel(h_img, height, width, left);
break;
case 6:gpu_kernel(h_img, height, width, right);
break;
case 7:gpu_kernel(h_img, height, width, top);
break;
case 8:gpu_kernel(h_img, height, width, bottom);
break;
default:
printf("Invalid operation");
break;
}
free(h_img);
return 0;
}
| 588a0166a0c0cb51ec02978c8c06dda1d5a673d0.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<string>
#include <stdio.h>
#include "CImg.h"
using namespace cimg_library;
#include<math.h>
#include<time.h>
#include<stdlib.h>
#include <stdio.h>
#include <ctime>
#include <cmath>
#include "helper.h"
using namespace std;
#define TILE_WIDTH 8
#define MAX_MASK_WIDTH 3
__constant__ double M[MAX_MASK_WIDTH][MAX_MASK_WIDTH];
float duration_gpu, duration_cpu, duration_kernel, duration_cpumem;
__global__ void ConvolutionKernel(unsigned char *P, unsigned char *N, int height, int width) {
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_WIDTH + ty;
int col_o = blockIdx.x * TILE_WIDTH + tx;
if (row_o < height && col_o < width) {
int row_i = row_o - MAX_MASK_WIDTH / 2;
int col_i = col_o - MAX_MASK_WIDTH / 2;
__shared__ float N_ds[TILE_WIDTH][TILE_WIDTH];
if (row_o * width + col_o < height*width)
N_ds[ty][tx] = N[row_o * width + col_o];
// Wait until all tile elements are loaded
__syncthreads();
double Pvalue = 0.0f;
int This_tile_col_start_point = blockIdx.x * blockDim.x;
int Next_tile_col_start_point = (blockIdx.x + 1) * blockDim.x;
int This_tile_row_start_point = blockIdx.y * blockDim.y;
int Next_tile_row_start_point = (blockIdx.y + 1) * blockDim.y;
if (ty < TILE_WIDTH && tx < TILE_WIDTH) {
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int row_index = row_i + i; int col_index = col_i + j;
if ((row_index >= This_tile_row_start_point) && (row_index < Next_tile_row_start_point)&&row_index<height) {
if ((col_index >= This_tile_col_start_point) && (col_index < Next_tile_col_start_point)&&col_index<width)
Pvalue += N_ds[threadIdx.y + i - (MAX_MASK_WIDTH / 2)][threadIdx.x + j - (MAX_MASK_WIDTH / 2)] * M[i][j];
else if (row_index >= height && col_i >= width) // 1
Pvalue +=
N[(height - 1) * width + (width - 1)] * M[i][j];
else if (row_index >= height && col_index < width &&
col_index >= 0) // 2
Pvalue +=
N[(height - 1) * width + col_index] * M[i][j];
else if (row_index >= height && col_index < 0) // 3
Pvalue += N[(height - 1) * width + 0] * M[i][j];
else if (row_index < height && row_index >= 0 &&
col_index < 0) // 4
Pvalue += N[row_index * width + 0] * M[i][j];
else if (row_index < 0 && col_index < 0) // 5
Pvalue += N[0 * width + 0] * M[i][j];
else if (row_index < 0 && col_index < width &&
col_index >= 0) // 6
Pvalue += N[0 * width + col_index] * M[i][j];
else if (row_index < 0 && col_index >= width) // 7
Pvalue += N[0 * width + (width - 1)] * M[i][j];
else if (row_index >= 0 && row_index < height &&
col_index >= width) // 8
Pvalue += N[row_index * width + (width - 1)] * M[i][j];
else
Pvalue += N[row_index * width + col_index] * M[i][j];
}
else if (row_index >= height && col_index >= width) // 1
Pvalue += N[(height - 1) * width + (width - 1)] * M[i][j];
else if (row_index >= height && col_index < width &&
col_index >= 0) // 2
Pvalue +=
N[(height - 1) * width + col_index] * M[i][j];
else if (row_index >= height && col_index < 0) // 3
Pvalue +=
N[(height - 1) * width + 0] * M[i][j];
else if (row_index < height && row_index >= 0 && col_index < 0) // 4
Pvalue +=
N[row_index* width + 0] * M[i][j];
else if (row_index < 0 && col_index < 0) // 5
Pvalue +=
N[0 * width + 0] * M[i][j];
else if (row_index < 0 && col_index < width && col_index >= 0) // 6
Pvalue +=
N[0 * width + col_index] * M[i][j];
else if (row_index < 0 && col_index >= width) // 7
Pvalue +=
N[0 * width + (width - 1)] * M[i][j];
else if (row_index >= 0 && row_index < height &&
col_index >= width) // 8
Pvalue +=
N[row_index * width + (width - 1)] * M[i][j];
else
Pvalue += N[row_index * width + col_index] * M[i][j];
}
}
if (row_o < height && col_o < width) {
P[row_o * width + col_o] = Pvalue;
}
}
}
}
void gpu_kernel(unsigned char* h_in, int height, int width, double filter[][3])
{
/*
double filter[3][3] = {
{0.0625,.125,0.0625},
{0.125,0.25,0.125},
{0.0625,0.125,.0625}
};
*/
cudaMemcpyToSymbol(M, filter, 3 * 3 * sizeof(double));
unsigned char* d_in, *d_out;
int n = height * width;
checkCudaErrors(cudaMalloc(&d_in, sizeof(unsigned char) *n));
checkCudaErrors(cudaMalloc(&d_out, sizeof(unsigned char) * n));
checkCudaErrors(cudaMemcpy(d_in, h_in, sizeof(unsigned char) * n, cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
dim3 grids(ceil(width / 8.0), ceil(height / 8.0));
dim3 threads(8, 8);
ConvolutionKernel << <grids, threads >> > (d_out, d_in, height, width);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&duration_kernel, start, stop);
printf("Elapsed time by the Kernel: %f s\n", duration_kernel / 1000);
duration_kernel /= 1000;
unsigned char *h_img;
h_img = (unsigned char *)malloc(height * width * sizeof(unsigned char));
checkCudaErrors(cudaMemcpy(h_img, d_out, sizeof(unsigned char) * n, cudaMemcpyDeviceToHost));
unsigned long long count = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (h_img[i * width + j] != h_in[i * width + j])
count++;
}
}
cout << count << " " << n << endl;
CImg<unsigned char> image_out(width, height, 1, 1, 0);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
image_out(j, i, 0, 0) = h_img[i * width + j];
}
}
image_out.save("out.jpg");
}
double cpu_reduce(double *h_in, int h_in_len) {
double sum = 0;
for (int i = 0; i < h_in_len; i++)
{
sum += h_in[i];
}
return sum;
}
int main()
{
double blur[3][3] = {
{0.0625,.125,0.0625},
{0.125,0.25,0.125},
{0.0625,0.125,.0625}
};
double emboss[3][3] = {
{-2,-1,0},
{-1,1,1},
{0,1,2}
};
double outline[3][3] = {
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1}
};
double sharpen[3][3] = {
{0,-1,0},
{-1,5,-1},
{0,-1,0}
};
double left[3][3] = {
{1,0,-1},
{2,0,-2},
{1,0,-1}
};
double right[3][3] = {
{-1,0,1},
{-2,0,2},
{-1,0,1}
};
double top[3][3] = {
{1,2,1},
{0,0,0},
{-1,-2,-1}
};
double bottom[3][3] = {
{-1,-2,-1},
{0,0,0},
{1,2,1}
};
string path;
cout <<"Enter the image path: ";
cin >> path;
cout << "Enter the number of the operation: \n";
printf("1 for blur. 2 for emboss. 3 for outline. 4 for sharpen\n 5 for left sobel. 6 for right sobel. 7 for top sobel. 8 for bottom sobel\n");
int type;
cin >> type;
CImg<unsigned char> image(path.c_str());
image.channel(0);
unsigned char *h_img;
h_img = (unsigned char *)malloc(image.height() *image.width() * sizeof(unsigned char));
int height = image.height();
int width = image.width();
//int height = 10;
//int width = 10;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
h_img[i * width+ j ] = (unsigned char)image(j, i, 0,0 );
//h_img[i * width + j] = 10;
}
}
int arraySize;
clock_t start;
switch (type)
{
case 1:gpu_kernel(h_img, height, width, blur);
break;
case 2:gpu_kernel(h_img, height, width, emboss);
break;
case 3:gpu_kernel(h_img, height, width,outline );
break;
case 4:gpu_kernel(h_img, height, width, sharpen);
break;
case 5:gpu_kernel(h_img, height, width, left);
break;
case 6:gpu_kernel(h_img, height, width, right);
break;
case 7:gpu_kernel(h_img, height, width, top);
break;
case 8:gpu_kernel(h_img, height, width, bottom);
break;
default:
printf("Invalid operation");
break;
}
free(h_img);
return 0;
}
|
bfd984b97260c00ee7cde36980b7d0f71ab1994c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
#include "../include/utils.h"
#include "../include/loadSaveImage.h"
static const int filterWidth = 9;
static const float filterSigma = 2.f;
void preProcess(uchar4 **h_inputImageRGBA, uchar4 **h_outputImageRGBA,
uchar4 **d_inputImageRGBA, uchar4 **d_outputImageRGBA,
unsigned char **d_redBlurred, unsigned char **d_red,
unsigned char **d_greenBlurred, unsigned char **d_green,
unsigned char **d_blueBlurred, unsigned char **d_blue,
float **h_filter, float **d_filter,
size_t &rows, size_t &cols,
const std::string &filename)
{
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
// allocate and load input image
loadImageRGBA(filename, h_inputImageRGBA, &rows, &cols);
// allocate output image
*h_outputImageRGBA = new uchar4[rows * cols];
//allocate memory on the device for both input and output
size_t numPixels = rows * cols;
checkCudaErrors(hipMalloc(d_inputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(hipMalloc(d_outputImageRGBA, sizeof(uchar4) * numPixels));
//copy input array to the GPU
checkCudaErrors(hipMemcpy(*d_inputImageRGBA, *h_inputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice));
//set output array on GPU to all zeros
checkCudaErrors(hipMemset(*d_outputImageRGBA, 0, numPixels * sizeof(uchar4)));
//create and fill the filter we will convolve with
*h_filter = new float[filterWidth * filterWidth];
float filterSum = 0.f; //for normalization
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r)
{
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c)
{
float filterValue = expf(-(float)(c * c + r * r) / (2.f * filterSigma * filterSigma));
(*h_filter)[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2] = filterValue;
filterSum += filterValue; // for normalization
}
}
// normalize filter
float normalizationFactor = 1.f / filterSum;
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r)
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c)
(*h_filter)[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2] *= normalizationFactor;
//original
checkCudaErrors(hipMalloc(d_red, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_green, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_blue, sizeof(unsigned char) * numPixels));
//blurred
checkCudaErrors(hipMalloc(d_redBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_greenBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_blueBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_redBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_greenBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_blueBlurred, 0, sizeof(unsigned char) * numPixels));
//filter
checkCudaErrors(hipMalloc(d_filter, sizeof(float) * filterWidth * filterWidth));
checkCudaErrors(hipMemcpy(*d_filter, *h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void postProcess(const std::string &output_file, uchar4 *const h_outputImage, const uchar4 *const d_outputImage,
const int rows, const int cols)
{
size_t numPixels = rows * cols;
checkCudaErrors(hipMemcpy(h_outputImage, d_outputImage, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost));
saveImageRGBA(h_outputImage, rows, cols, output_file);
}
__global__ void gaussian_blur_kernel(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
const int numRows, const int numCols,
const float *const filter, const int filterWidth)
{
// Set the pixel coordinate
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= numCols || i >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r)
{
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c)
{
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(i + filter_r, 0), numRows - 1);
int image_c = min(max(j + filter_c, 0), numCols - 1);
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[i * numCols + j] = static_cast<unsigned char>(result);
}
__global__ void separateChannels_kernel(const uchar4 *const inputImageRGBA,
const int numRows, const int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel)
{
// Set the pixel coordinate
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= numCols || i >= numRows)
return;
int tid = i * numCols + j;
redChannel[tid] = inputImageRGBA[tid].x;
greenChannel[tid] = inputImageRGBA[tid].y;
blueChannel[tid] = inputImageRGBA[tid].z;
}
__global__ void recombineChannels_kernel(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
const int numRows, const int numCols)
{
int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void cuda_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const int numRows, const int numCols,
unsigned char *d_redBlurred, unsigned char *d_red,
unsigned char *d_greenBlurred, unsigned char *d_green,
unsigned char *d_blueBlurred, unsigned char *d_blue,
float *d_filter, const int filterWidth)
{
// define the dimensions of each thread block (max = 1024 = 32*32)
int blockW = 32;
int blockH = 32;
// Set reasonable block size (i.e., number of threads per block)
dim3 blockSize(blockW, blockH);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and block size.
int gridW = (numCols % blockW != 0) ? (numCols / blockW + 1) : (numCols / blockW);
int gridH = (numRows % blockH != 0) ? (numRows / blockH + 1) : (numRows / blockH);
dim3 gridSize(gridW, gridH);
// Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
// TODO: use streams for concurrency
hipLaunchKernelGGL(( gaussian_blur_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine channels results
hipLaunchKernelGGL(( recombineChannels_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
void gaussian_blur(const std::string &input_file, const std::string &output_file)
{
size_t numRows, numCols;
uchar4 *h_inputImageRGBA, *d_inputImageRGBA;
uchar4 *h_outputImageRGBA, *d_outputImageRGBA;
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
unsigned char *d_red, *d_green, *d_blue;
float *h_filter, *d_filter;
preProcess(&h_inputImageRGBA, &h_outputImageRGBA, &d_inputImageRGBA, &d_outputImageRGBA,
&d_redBlurred, &d_red, &d_greenBlurred, &d_green, &d_blueBlurred, &d_blue,
&h_filter, &d_filter, numRows, numCols, input_file);
cuda_gaussian_blur(h_inputImageRGBA, d_inputImageRGBA, d_outputImageRGBA, numRows, numCols,
d_redBlurred, d_greenBlurred, d_blueBlurred, d_red, d_green, d_blue,
d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
postProcess(output_file, h_outputImageRGBA, d_outputImageRGBA, numRows, numCols);
checkCudaErrors(hipFree(d_inputImageRGBA));
checkCudaErrors(hipFree(d_outputImageRGBA));
checkCudaErrors(hipFree(d_filter));
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_redBlurred));
checkCudaErrors(hipFree(d_greenBlurred));
checkCudaErrors(hipFree(d_blueBlurred));
delete[] h_inputImageRGBA;
delete[] h_outputImageRGBA;
delete[] h_filter;
} | bfd984b97260c00ee7cde36980b7d0f71ab1994c.cu | #include <iostream>
#include <cmath>
#include <cuda_runtime.h>
#include "../include/utils.h"
#include "../include/loadSaveImage.h"
static const int filterWidth = 9;
static const float filterSigma = 2.f;
void preProcess(uchar4 **h_inputImageRGBA, uchar4 **h_outputImageRGBA,
uchar4 **d_inputImageRGBA, uchar4 **d_outputImageRGBA,
unsigned char **d_redBlurred, unsigned char **d_red,
unsigned char **d_greenBlurred, unsigned char **d_green,
unsigned char **d_blueBlurred, unsigned char **d_blue,
float **h_filter, float **d_filter,
size_t &rows, size_t &cols,
const std::string &filename)
{
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
// allocate and load input image
loadImageRGBA(filename, h_inputImageRGBA, &rows, &cols);
// allocate output image
*h_outputImageRGBA = new uchar4[rows * cols];
//allocate memory on the device for both input and output
size_t numPixels = rows * cols;
checkCudaErrors(cudaMalloc(d_inputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMalloc(d_outputImageRGBA, sizeof(uchar4) * numPixels));
//copy input array to the GPU
checkCudaErrors(cudaMemcpy(*d_inputImageRGBA, *h_inputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice));
//set output array on GPU to all zeros
checkCudaErrors(cudaMemset(*d_outputImageRGBA, 0, numPixels * sizeof(uchar4)));
//create and fill the filter we will convolve with
*h_filter = new float[filterWidth * filterWidth];
float filterSum = 0.f; //for normalization
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r)
{
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c)
{
float filterValue = expf(-(float)(c * c + r * r) / (2.f * filterSigma * filterSigma));
(*h_filter)[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2] = filterValue;
filterSum += filterValue; // for normalization
}
}
// normalize filter
float normalizationFactor = 1.f / filterSum;
for (int r = -filterWidth / 2; r <= filterWidth / 2; ++r)
for (int c = -filterWidth / 2; c <= filterWidth / 2; ++c)
(*h_filter)[(r + filterWidth / 2) * filterWidth + c + filterWidth / 2] *= normalizationFactor;
//original
checkCudaErrors(cudaMalloc(d_red, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_green, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_blue, sizeof(unsigned char) * numPixels));
//blurred
checkCudaErrors(cudaMalloc(d_redBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_greenBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_blueBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_redBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_greenBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_blueBlurred, 0, sizeof(unsigned char) * numPixels));
//filter
checkCudaErrors(cudaMalloc(d_filter, sizeof(float) * filterWidth * filterWidth));
checkCudaErrors(cudaMemcpy(*d_filter, *h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void postProcess(const std::string &output_file, uchar4 *const h_outputImage, const uchar4 *const d_outputImage,
const int rows, const int cols)
{
size_t numPixels = rows * cols;
checkCudaErrors(cudaMemcpy(h_outputImage, d_outputImage, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost));
saveImageRGBA(h_outputImage, rows, cols, output_file);
}
__global__ void gaussian_blur_kernel(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
const int numRows, const int numCols,
const float *const filter, const int filterWidth)
{
// Set the pixel coordinate
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= numCols || i >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r)
{
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c)
{
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(i + filter_r, 0), numRows - 1);
int image_c = min(max(j + filter_c, 0), numCols - 1);
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[i * numCols + j] = static_cast<unsigned char>(result);
}
__global__ void separateChannels_kernel(const uchar4 *const inputImageRGBA,
const int numRows, const int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel)
{
// Set the pixel coordinate
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= numCols || i >= numRows)
return;
int tid = i * numCols + j;
redChannel[tid] = inputImageRGBA[tid].x;
greenChannel[tid] = inputImageRGBA[tid].y;
blueChannel[tid] = inputImageRGBA[tid].z;
}
__global__ void recombineChannels_kernel(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
const int numRows, const int numCols)
{
int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void cuda_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const int numRows, const int numCols,
unsigned char *d_redBlurred, unsigned char *d_red,
unsigned char *d_greenBlurred, unsigned char *d_green,
unsigned char *d_blueBlurred, unsigned char *d_blue,
float *d_filter, const int filterWidth)
{
// define the dimensions of each thread block (max = 1024 = 32*32)
int blockW = 32;
int blockH = 32;
// Set reasonable block size (i.e., number of threads per block)
dim3 blockSize(blockW, blockH);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and block size.
int gridW = (numCols % blockW != 0) ? (numCols / blockW + 1) : (numCols / blockW);
int gridH = (numRows % blockH != 0) ? (numRows / blockH + 1) : (numRows / blockH);
dim3 gridSize(gridW, gridH);
// Launch a kernel for separating the RGBA image into different color channels
separateChannels_kernel<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
// TODO: use streams for concurrency
gaussian_blur_kernel<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_kernel<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_kernel<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine channels results
recombineChannels_kernel<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
void gaussian_blur(const std::string &input_file, const std::string &output_file)
{
size_t numRows, numCols;
uchar4 *h_inputImageRGBA, *d_inputImageRGBA;
uchar4 *h_outputImageRGBA, *d_outputImageRGBA;
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
unsigned char *d_red, *d_green, *d_blue;
float *h_filter, *d_filter;
preProcess(&h_inputImageRGBA, &h_outputImageRGBA, &d_inputImageRGBA, &d_outputImageRGBA,
&d_redBlurred, &d_red, &d_greenBlurred, &d_green, &d_blueBlurred, &d_blue,
&h_filter, &d_filter, numRows, numCols, input_file);
cuda_gaussian_blur(h_inputImageRGBA, d_inputImageRGBA, d_outputImageRGBA, numRows, numCols,
d_redBlurred, d_greenBlurred, d_blueBlurred, d_red, d_green, d_blue,
d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
postProcess(output_file, h_outputImageRGBA, d_outputImageRGBA, numRows, numCols);
checkCudaErrors(cudaFree(d_inputImageRGBA));
checkCudaErrors(cudaFree(d_outputImageRGBA));
checkCudaErrors(cudaFree(d_filter));
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_redBlurred));
checkCudaErrors(cudaFree(d_greenBlurred));
checkCudaErrors(cudaFree(d_blueBlurred));
delete[] h_inputImageRGBA;
delete[] h_outputImageRGBA;
delete[] h_filter;
} |
c8630f3b176eb3b1d871007debf6d3baa93da16f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
//#define N 8192
//default one is 2048
#define N 2048
//#define M 8192
#define M 2048
// 16384 takes too long
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] = ((DATA_TYPE) i*j + 2) / N;
}
for (j = 0; j < M; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
B[i*N + j] = ((DATA_TYPE) i*j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j, k;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] *= BETA;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= BETA;
int k;
for(k = 0; k < M; k++)
{
c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k];
}
}
}
void syr2kCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * M);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * M, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) )));
t_start = rtclock();
hipLaunchKernelGGL(( syr2k_kernel), dim3(grid),dim3(block), 0, 0, A_gpu,B_gpu,C_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, B, C);
GPU_argv_init();
syr2kCuda(A, B, C, C_outputFromGpu);
t_start = rtclock();
// syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
// compareResults(C, C_outputFromGpu);
free(A);
free(B);
free(C);
free(C_outputFromGpu);
return 0;
}
| c8630f3b176eb3b1d871007debf6d3baa93da16f.cu | /**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
//#define N 8192
//default one is 2048
#define N 2048
//#define M 8192
#define M 2048
// 16384 takes too long
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] = ((DATA_TYPE) i*j + 2) / N;
}
for (j = 0; j < M; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
B[i*N + j] = ((DATA_TYPE) i*j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j, k;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] *= BETA;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= BETA;
int k;
for(k = 0; k < M; k++)
{
c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k];
}
}
}
void syr2kCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * M);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * M, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) )));
t_start = rtclock();
syr2k_kernel<<<grid,block>>>(A_gpu,B_gpu,C_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, B, C);
GPU_argv_init();
syr2kCuda(A, B, C, C_outputFromGpu);
t_start = rtclock();
// syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
// compareResults(C, C_outputFromGpu);
free(A);
free(B);
free(C);
free(C_outputFromGpu);
return 0;
}
|
b2a4b742c8a43587b5a91010f194c8db84156960.hip | // !!! This is a file automatically generated by hipify!!!
#include <numeric>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "util.hpp"
__global__
void histogram(int* x, int* bins, int n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
const auto c = x[i];
atomicAdd(bins+c, 1);
}
}
int main(void) {
const int n = 1024;
const int c = 16;
int* x = malloc_managed<int>(n);
for (auto i=0; i<n; ++i) x[i] = rand()%c;
int* bins = malloc_managed<int>(c);
std::fill(bins, bins+c, 0);
hipLaunchKernelGGL(( histogram), dim3(1), dim3(n), 0, 0, x, bins, n);
hipDeviceSynchronize();
printf("bins: ");
for (auto i=0; i<c; ++i) printf("%d ", bins[i]); printf("\n");
auto sum = std::accumulate(bins, bins+c, 0);
printf("sum %d, expected %d\n", sum, n);
hipFree(x);
hipFree(bins);
return 0;
}
| b2a4b742c8a43587b5a91010f194c8db84156960.cu | #include <numeric>
#include <cstdio>
#include <cuda.h>
#include "util.hpp"
__global__
void histogram(int* x, int* bins, int n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
const auto c = x[i];
atomicAdd(bins+c, 1);
}
}
int main(void) {
const int n = 1024;
const int c = 16;
int* x = malloc_managed<int>(n);
for (auto i=0; i<n; ++i) x[i] = rand()%c;
int* bins = malloc_managed<int>(c);
std::fill(bins, bins+c, 0);
histogram<<<1, n>>>(x, bins, n);
cudaDeviceSynchronize();
printf("bins: ");
for (auto i=0; i<c; ++i) printf("%d ", bins[i]); printf("\n");
auto sum = std::accumulate(bins, bins+c, 0);
printf("sum %d, expected %d\n", sum, n);
cudaFree(x);
cudaFree(bins);
return 0;
}
|
a66db353027a95916838cb54797314e1307be763.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void helloFromGPU()
{
printf("Hello from GPU thread %d!\n", threadIdx.x);
}
int main(int argc, char **argv)
{
printf("Hello from CPU\n");
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, );
// hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| a66db353027a95916838cb54797314e1307be763.cu | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU()
{
printf("Hello from GPU thread %d!\n", threadIdx.x);
}
int main(int argc, char **argv)
{
printf("Hello from CPU\n");
helloFromGPU <<<1, 10>>>();
// cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
2d489066313b9b69c1a1eeefedfdc8e529dbd029.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calculoAlgoritmoTroca(float *dev_matrizSuperior, int linhaPerm, int colunaPerm, int totalColunas, int totalLinhas)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float fatorAnulador = 0.0;
//evitar operao em endereo invalido
//se for indice da linha permissivel, desconsiderar
if (i > totalLinhas || i == linhaPerm)
return;
//computar fator anulador da respectiva linha
fatorAnulador = dev_matrizSuperior[i * totalColunas + colunaPerm] * (-1);
//calcular os valores dos elementos da linha usando o fator anulador coletado
for (int coluna = 0; coluna < totalColunas; coluna++){
if (i * totalColunas + coluna > totalLinhas * totalColunas)
return;
//o valor da coluna permissivel sera 0
if (coluna == colunaPerm)
dev_matrizSuperior[i * totalColunas + coluna] = 0;
else
//os demais valores devem respeitar a equacao
//Valor = FatorAnulador * ValorRefLinhaPerm + LinhaAtual;
dev_matrizSuperior[i * totalColunas + coluna] = fatorAnulador
* dev_matrizSuperior[linhaPerm * totalColunas + coluna]
+ dev_matrizSuperior[i * totalColunas + coluna];
}
} | 2d489066313b9b69c1a1eeefedfdc8e529dbd029.cu | #include "includes.h"
__global__ void calculoAlgoritmoTroca(float *dev_matrizSuperior, int linhaPerm, int colunaPerm, int totalColunas, int totalLinhas)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float fatorAnulador = 0.0;
//evitar operação em endereço invalido
//se for indice da linha permissivel, desconsiderar
if (i > totalLinhas || i == linhaPerm)
return;
//computar fator anulador da respectiva linha
fatorAnulador = dev_matrizSuperior[i * totalColunas + colunaPerm] * (-1);
//calcular os valores dos elementos da linha usando o fator anulador coletado
for (int coluna = 0; coluna < totalColunas; coluna++){
if (i * totalColunas + coluna > totalLinhas * totalColunas)
return;
//o valor da coluna permissivel sera 0
if (coluna == colunaPerm)
dev_matrizSuperior[i * totalColunas + coluna] = 0;
else
//os demais valores devem respeitar a equacao
//Valor = FatorAnulador * ValorRefLinhaPerm + LinhaAtual;
dev_matrizSuperior[i * totalColunas + coluna] = fatorAnulador
* dev_matrizSuperior[linhaPerm * totalColunas + coluna]
+ dev_matrizSuperior[i * totalColunas + coluna];
}
} |
3c67b22d2c6e2693c55ac5b23ea28053bf77d5ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "cupoch/visualization/shader/simple_white_shader.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
struct copy_pointcloud_normal_functor {
copy_pointcloud_normal_functor(const Eigen::Vector3f* points,
const Eigen::Vector3f* normals, float line_length)
: points_(points), normals_(normals), line_length_(line_length) {};
const Eigen::Vector3f* points_;
const Eigen::Vector3f* normals_;
const float line_length_;
__device__
Eigen::Vector3f operator() (size_t idx) {
int i = idx / 2;
int j = idx % 2;
if (j == 0) {
return points_[i];
} else {
return points_[i] + normals_[i] * line_length_;
}
}
};
struct copy_mesh_wireflame_functor {
copy_mesh_wireflame_functor(const Eigen::Vector3f* vertices, const int* triangles)
: vertices_(vertices), triangles_(triangles) {};
const Eigen::Vector3f* vertices_;
const int* triangles_;
__device__
Eigen::Vector3f operator() (size_t k) {
int vi = triangles_[k];
return vertices_[vi];
}
};
}
bool SimpleWhiteShader::Compile() {
if (CompileShaders(simple_white_vertex_shader, NULL,
simple_white_fragment_shader) == false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
MVP_ = glGetUniformLocation(program_, "MVP");
return true;
}
void SimpleWhiteShader::Release() {
UnbindGeometry();
ReleaseProgram();
}
bool SimpleWhiteShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone));
Eigen::Vector3f* raw_points_ptr;
size_t n_bytes;
cudaSafeCall(hipGraphicsMapResources(1, cuda_graphics_resources_));
cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(1);
bound_ = true;
return true;
}
bool SimpleWhiteShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
return true;
}
void SimpleWhiteShader::UnbindGeometry() {
if (bound_) {
glDeleteBuffers(1, &vertex_position_buffer_);
bound_ = false;
}
}
bool SimpleWhiteShaderForPointCloudNormal::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleWhiteShaderForPointCloudNormal::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
const geometry::PointCloud &pointcloud =
(const geometry::PointCloud &)geometry;
if (pointcloud.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty pointcloud.");
return false;
}
float line_length =
option.point_size_ * 0.01 * view.GetBoundingBox().GetMaxExtent();
copy_pointcloud_normal_functor func(thrust::raw_pointer_cast(pointcloud.points_.data()),
thrust::raw_pointer_cast(pointcloud.normals_.data()), line_length);
thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(pointcloud.points_.size() * 2),
points, func);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(pointcloud.points_.size() * 2);
return true;
}
size_t SimpleWhiteShaderForPointCloudNormal::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::PointCloud &)geometry).points_.size() * 2;
}
bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
glLineWidth(1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_POLYGON_OFFSET_FILL);
return true;
}
bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty geometry::TriangleMesh.");
return false;
}
copy_mesh_wireflame_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()),
(int*)(thrust::raw_pointer_cast(mesh.triangles_.data())));
thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3),
points, func);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t SimpleWhiteShaderForTriangleMeshWireFrame::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
} | 3c67b22d2c6e2693c55ac5b23ea28053bf77d5ae.cu | #include "cupoch/visualization/shader/simple_white_shader.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
struct copy_pointcloud_normal_functor {
copy_pointcloud_normal_functor(const Eigen::Vector3f* points,
const Eigen::Vector3f* normals, float line_length)
: points_(points), normals_(normals), line_length_(line_length) {};
const Eigen::Vector3f* points_;
const Eigen::Vector3f* normals_;
const float line_length_;
__device__
Eigen::Vector3f operator() (size_t idx) {
int i = idx / 2;
int j = idx % 2;
if (j == 0) {
return points_[i];
} else {
return points_[i] + normals_[i] * line_length_;
}
}
};
struct copy_mesh_wireflame_functor {
copy_mesh_wireflame_functor(const Eigen::Vector3f* vertices, const int* triangles)
: vertices_(vertices), triangles_(triangles) {};
const Eigen::Vector3f* vertices_;
const int* triangles_;
__device__
Eigen::Vector3f operator() (size_t k) {
int vi = triangles_[k];
return vertices_[vi];
}
};
}
bool SimpleWhiteShader::Compile() {
if (CompileShaders(simple_white_vertex_shader, NULL,
simple_white_fragment_shader) == false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
MVP_ = glGetUniformLocation(program_, "MVP");
return true;
}
void SimpleWhiteShader::Release() {
UnbindGeometry();
ReleaseProgram();
}
bool SimpleWhiteShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone));
Eigen::Vector3f* raw_points_ptr;
size_t n_bytes;
cudaSafeCall(cudaGraphicsMapResources(1, cuda_graphics_resources_));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(1);
bound_ = true;
return true;
}
bool SimpleWhiteShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
return true;
}
void SimpleWhiteShader::UnbindGeometry() {
if (bound_) {
glDeleteBuffers(1, &vertex_position_buffer_);
bound_ = false;
}
}
bool SimpleWhiteShaderForPointCloudNormal::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleWhiteShaderForPointCloudNormal::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
const geometry::PointCloud &pointcloud =
(const geometry::PointCloud &)geometry;
if (pointcloud.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty pointcloud.");
return false;
}
float line_length =
option.point_size_ * 0.01 * view.GetBoundingBox().GetMaxExtent();
copy_pointcloud_normal_functor func(thrust::raw_pointer_cast(pointcloud.points_.data()),
thrust::raw_pointer_cast(pointcloud.normals_.data()), line_length);
thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(pointcloud.points_.size() * 2),
points, func);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(pointcloud.points_.size() * 2);
return true;
}
size_t SimpleWhiteShaderForPointCloudNormal::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::PointCloud &)geometry).points_.size() * 2;
}
bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
glLineWidth(1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_POLYGON_OFFSET_FILL);
return true;
}
bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty geometry::TriangleMesh.");
return false;
}
copy_mesh_wireflame_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()),
(int*)(thrust::raw_pointer_cast(mesh.triangles_.data())));
thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3),
points, func);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t SimpleWhiteShaderForTriangleMeshWireFrame::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
} |
331dd1d4051df4e04d05ae8f2b3acce6de7a0cd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#include <iostream>
#include <random>
#include <ctime>
#define numThreads 512
__global__ void addVectors(
int size,
float *d_a,
float *d_b,
float *d_c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size)
{
return;
}
d_c[i] = d_a[i] + d_b[i];
}
int main(int argc, char** argv)
{
/*
Initialize and define an integer which will
store the size of the vectors that we use.
*/
int size = 1000;
/*
Check for arguments into the main function,
if they exist, the second one should denote
the size of the vector.
*/
if (argc >= 2)
{
size = int(argv[1]);
}
/*
Initialize three vectors with size 'size'
*/
std::vector<float> h_a(size), h_b(size), h_c(size);
/*
Initialize pseudo-random number generator
and uniform distribution.
*/
std::mt19937 gen(time(NULL));
std::uniform_real_distribution<float> dist(0.0, 1.0);
/*
Fill random numbers in the vectors.
*/
for (int i = 0; i < size; i++)
{
h_a[i] = dist(gen);
h_b[i] = dist(gen);
}
/*
Intialize float pointers for the device pointers.
*/
float *d_a, *d_b, *d_c;
/*
Allocate space on the GPU (device) for our arrays.
*/
hipMalloc((void**)&d_a, size*sizeof(float));
hipMalloc((void**)&d_b, size*sizeof(float));
hipMalloc((void**)&d_c, size*sizeof(float));
/*
Copy data from the host vectors using the .data() member function
of vectors to access the underlaying array.
*/
hipMemcpy(d_a, h_a.data(), size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), size*sizeof(float), hipMemcpyHostToDevice);
/*
Call kernel to handle vector addition.
*/
hipLaunchKernelGGL(( addVectors) , dim3((size + numThreads - 1) / numThreads), dim3(numThreads) , 0, 0,
size,
d_a,
d_b,
d_c);
/*
Copy data back from device to host.
*/
hipMemcpy(h_c.data(), d_c, size*sizeof(float), hipMemcpyDeviceToHost);
/*
Print out results.
*/
for (int i = 0; i < size; i++)
{
std::cout << h_c[i] << std::endl;
}
/*
Free up allocated space on the device
*/
hipFree(d_a); hipFree(d_b); hipFree(d_c);
hipDeviceReset();
return 0;
} | 331dd1d4051df4e04d05ae8f2b3acce6de7a0cd4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#include <iostream>
#include <random>
#include <ctime>
#define numThreads 512
__global__ void addVectors(
int size,
float *d_a,
float *d_b,
float *d_c)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size)
{
return;
}
d_c[i] = d_a[i] + d_b[i];
}
int main(int argc, char** argv)
{
/*
Initialize and define an integer which will
store the size of the vectors that we use.
*/
int size = 1000;
/*
Check for arguments into the main function,
if they exist, the second one should denote
the size of the vector.
*/
if (argc >= 2)
{
size = int(argv[1]);
}
/*
Initialize three vectors with size 'size'
*/
std::vector<float> h_a(size), h_b(size), h_c(size);
/*
Initialize pseudo-random number generator
and uniform distribution.
*/
std::mt19937 gen(time(NULL));
std::uniform_real_distribution<float> dist(0.0, 1.0);
/*
Fill random numbers in the vectors.
*/
for (int i = 0; i < size; i++)
{
h_a[i] = dist(gen);
h_b[i] = dist(gen);
}
/*
Intialize float pointers for the device pointers.
*/
float *d_a, *d_b, *d_c;
/*
Allocate space on the GPU (device) for our arrays.
*/
cudaMalloc((void**)&d_a, size*sizeof(float));
cudaMalloc((void**)&d_b, size*sizeof(float));
cudaMalloc((void**)&d_c, size*sizeof(float));
/*
Copy data from the host vectors using the .data() member function
of vectors to access the underlaying array.
*/
cudaMemcpy(d_a, h_a.data(), size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), size*sizeof(float), cudaMemcpyHostToDevice);
/*
Call kernel to handle vector addition.
*/
addVectors <<<(size + numThreads - 1) / numThreads, numThreads >>>(
size,
d_a,
d_b,
d_c);
/*
Copy data back from device to host.
*/
cudaMemcpy(h_c.data(), d_c, size*sizeof(float), cudaMemcpyDeviceToHost);
/*
Print out results.
*/
for (int i = 0; i < size; i++)
{
std::cout << h_c[i] << std::endl;
}
/*
Free up allocated space on the device
*/
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
cudaDeviceReset();
return 0;
} |
146b5ad786d70a93f0e49049c0d75bb8f14ceec1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setTensorCheckPatternKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
unsigned int ndata = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setTensorCheckPatternKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,ndata);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setTensorCheckPatternKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,ndata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setTensorCheckPatternKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,ndata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 146b5ad786d70a93f0e49049c0d75bb8f14ceec1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setTensorCheckPatternKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
unsigned int ndata = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setTensorCheckPatternKernel<<<gridBlock,threadBlock>>>(data,ndata);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setTensorCheckPatternKernel<<<gridBlock,threadBlock>>>(data,ndata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setTensorCheckPatternKernel<<<gridBlock,threadBlock>>>(data,ndata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
679f6df382c0183bc65bef6fb34f88e82bf5c6ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define BLOCK_SIZE 16
// CUDA code to add matrix. It linearizes the 2D matrix and adds them on different threads.
__global__ static void AddMatrix(float *dev_buf1, float *dev_buf2, float *dev_buf_s, size_t pitch, int row_size, int col_size)
{
const int tidx = blockDim.x * blockIdx.x + threadIdx.x;
const int tidy = blockDim.y * blockIdx.y + threadIdx.y;
int index = pitch/sizeof(float);
if(tidx<row_size && tidy<col_size)
{
dev_buf_s[tidx * index + tidy] = dev_buf1[tidx * index + tidy] + dev_buf2[tidx * index + tidy];
}
}
//Print Matrix
void printMatrix(float *lin_matrix, int row_size, int col_size)
{
for(int idxM = 0; idxM < row_size; idxM++)
{
for(int idxN = 0; idxN < col_size; idxN++)
{
printf("%f ",lin_matrix[(idxM * col_size) + idxN]);
}
printf("\n");
}
printf("\n");
}
int main()
{
int row_size=100,col_size=100;
//Allocation of memory
float *host_mat1 = (float*)malloc(row_size * col_size * sizeof(float));
float *host_mat2 = (float*)malloc(row_size * col_size * sizeof(float));
float *host_sum = (float*)malloc(row_size * col_size * sizeof(float));
//Fill matrix with random numbers
for(int j=0;j<(row_size*col_size);j++)
{
host_mat1[j]=((float)rand()/(float)RAND_MAX)*10000;
host_mat2[j]=((float)rand()/(float)RAND_MAX)*10000;
}
//Print input matrixs
printf("==================Matrix 1===========================\n");
printMatrix(host_mat1, row_size, col_size);
printf("===========================Matrix 2==========================\n");
printMatrix(host_mat2, row_size, col_size);
//CUDA allocation on device
float *dev_mat1, *dev_mat2, *dev_mat_sum;
size_t dev_mat_p;
hipMallocPitch((void**)&dev_mat1,&dev_mat_p,col_size*sizeof(float),row_size);
hipMallocPitch((void**)&dev_mat2,&dev_mat_p,col_size*sizeof(float),row_size);
hipMallocPitch((void**)&dev_mat_sum,&dev_mat_p,col_size*sizeof(float),row_size);
//Copy data to device
hipMemcpy2D(dev_mat1,dev_mat_p,host_mat1,col_size * sizeof(float), col_size * sizeof(float), row_size, hipMemcpyHostToDevice);
hipMemcpy2D(dev_mat2,dev_mat_p,host_mat2,col_size * sizeof(float), col_size * sizeof(float), row_size, hipMemcpyHostToDevice);
//Threads and Block sizes
dim3 blocks(1,1,1);
dim3 threads_per_block(BLOCK_SIZE,BLOCK_SIZE,1);
blocks.x=((row_size/BLOCK_SIZE) + (((row_size)%BLOCK_SIZE)==0?0:1));
blocks.y=((col_size/BLOCK_SIZE) + (((col_size)%BLOCK_SIZE)==0?0:1));
//Function call to add
hipLaunchKernelGGL(( AddMatrix), dim3(blocks), dim3(threads_per_block), 0, 0, dev_mat1, dev_mat2, dev_mat_sum, dev_mat_p, row_size,col_size);
hipDeviceSynchronize();
//Copy back result matrix to host
hipMemcpy2D(host_sum, col_size * sizeof(float),dev_mat_sum, dev_mat_p, col_size * sizeof(float), row_size, hipMemcpyDeviceToHost);
//Free CUDA device memory
hipFree(dev_mat1);
hipFree(dev_mat2);
hipFree(dev_mat_sum);
printf("=================Matrix Sum=========================\n");
printMatrix(host_sum, row_size, col_size);
}
| 679f6df382c0183bc65bef6fb34f88e82bf5c6ad.cu | #include<stdio.h>
#include<cuda.h>
#define BLOCK_SIZE 16
// CUDA code to add matrix. It linearizes the 2D matrix and adds them on different threads.
__global__ static void AddMatrix(float *dev_buf1, float *dev_buf2, float *dev_buf_s, size_t pitch, int row_size, int col_size)
{
const int tidx = blockDim.x * blockIdx.x + threadIdx.x;
const int tidy = blockDim.y * blockIdx.y + threadIdx.y;
int index = pitch/sizeof(float);
if(tidx<row_size && tidy<col_size)
{
dev_buf_s[tidx * index + tidy] = dev_buf1[tidx * index + tidy] + dev_buf2[tidx * index + tidy];
}
}
//Print Matrix
void printMatrix(float *lin_matrix, int row_size, int col_size)
{
for(int idxM = 0; idxM < row_size; idxM++)
{
for(int idxN = 0; idxN < col_size; idxN++)
{
printf("%f ",lin_matrix[(idxM * col_size) + idxN]);
}
printf("\n");
}
printf("\n");
}
int main()
{
int row_size=100,col_size=100;
//Allocation of memory
float *host_mat1 = (float*)malloc(row_size * col_size * sizeof(float));
float *host_mat2 = (float*)malloc(row_size * col_size * sizeof(float));
float *host_sum = (float*)malloc(row_size * col_size * sizeof(float));
//Fill matrix with random numbers
for(int j=0;j<(row_size*col_size);j++)
{
host_mat1[j]=((float)rand()/(float)RAND_MAX)*10000;
host_mat2[j]=((float)rand()/(float)RAND_MAX)*10000;
}
//Print input matrixs
printf("==================Matrix 1===========================\n");
printMatrix(host_mat1, row_size, col_size);
printf("===========================Matrix 2==========================\n");
printMatrix(host_mat2, row_size, col_size);
//CUDA allocation on device
float *dev_mat1, *dev_mat2, *dev_mat_sum;
size_t dev_mat_p;
cudaMallocPitch((void**)&dev_mat1,&dev_mat_p,col_size*sizeof(float),row_size);
cudaMallocPitch((void**)&dev_mat2,&dev_mat_p,col_size*sizeof(float),row_size);
cudaMallocPitch((void**)&dev_mat_sum,&dev_mat_p,col_size*sizeof(float),row_size);
//Copy data to device
cudaMemcpy2D(dev_mat1,dev_mat_p,host_mat1,col_size * sizeof(float), col_size * sizeof(float), row_size, cudaMemcpyHostToDevice);
cudaMemcpy2D(dev_mat2,dev_mat_p,host_mat2,col_size * sizeof(float), col_size * sizeof(float), row_size, cudaMemcpyHostToDevice);
//Threads and Block sizes
dim3 blocks(1,1,1);
dim3 threads_per_block(BLOCK_SIZE,BLOCK_SIZE,1);
blocks.x=((row_size/BLOCK_SIZE) + (((row_size)%BLOCK_SIZE)==0?0:1));
blocks.y=((col_size/BLOCK_SIZE) + (((col_size)%BLOCK_SIZE)==0?0:1));
//Function call to add
AddMatrix<<<blocks, threads_per_block>>>(dev_mat1, dev_mat2, dev_mat_sum, dev_mat_p, row_size,col_size);
cudaThreadSynchronize();
//Copy back result matrix to host
cudaMemcpy2D(host_sum, col_size * sizeof(float),dev_mat_sum, dev_mat_p, col_size * sizeof(float), row_size, cudaMemcpyDeviceToHost);
//Free CUDA device memory
cudaFree(dev_mat1);
cudaFree(dev_mat2);
cudaFree(dev_mat_sum);
printf("=================Matrix Sum=========================\n");
printMatrix(host_sum, row_size, col_size);
}
|
a4e8665e210a8fcbbe99edcc89eb116e0eecb1cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b, int n, const float *xyz, int m,
const float *xyz2, float *result, int *result_i) {
const int batch = 512;
__shared__ float buf[batch * 3];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int k2 = 0; k2 < m; k2 += batch) {
int end_k = min(m, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) {
buf[j] = xyz2[(i * m + k2) * 3 + j];
}
__syncthreads();
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz[(i * n + j) * 3 + 0];
float y1 = xyz[(i * n + j) * 3 + 1];
float z1 = xyz[(i * n + j) * 3 + 2];
int best_i = 0;
float best = 0;
int end_ka = end_k - (end_k & 3);
if (end_ka == batch) {
for (int k = 0; k < batch; k += 4) {
{
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
{
float x2 = buf[k * 3 + 3] - x1;
float y2 = buf[k * 3 + 4] - y1;
float z2 = buf[k * 3 + 5] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 1;
}
}
{
float x2 = buf[k * 3 + 6] - x1;
float y2 = buf[k * 3 + 7] - y1;
float z2 = buf[k * 3 + 8] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 2;
}
}
{
float x2 = buf[k * 3 + 9] - x1;
float y2 = buf[k * 3 + 10] - y1;
float z2 = buf[k * 3 + 11] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 3;
}
}
}
} else {
for (int k = 0; k < end_ka; k += 4) {
{
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
{
float x2 = buf[k * 3 + 3] - x1;
float y2 = buf[k * 3 + 4] - y1;
float z2 = buf[k * 3 + 5] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 1;
}
}
{
float x2 = buf[k * 3 + 6] - x1;
float y2 = buf[k * 3 + 7] - y1;
float z2 = buf[k * 3 + 8] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 2;
}
}
{
float x2 = buf[k * 3 + 9] - x1;
float y2 = buf[k * 3 + 10] - y1;
float z2 = buf[k * 3 + 11] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 3;
}
}
}
}
for (int k = end_ka; k < end_k; k++) {
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
if (k2 == 0 || result[(i * n + j)] > best) {
result[(i * n + j)] = best;
result_i[(i * n + j)] = best_i;
}
}
__syncthreads();
}
}
}
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1,
at::Tensor idx2) {
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel) , dim3(dim3(32, 16, 1)), dim3(512) , 0, 0, batch_size, n, xyz1.data<float>(), m,
xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel) , dim3(dim3(32, 16, 1)), dim3(512) , 0, 0, batch_size, m, xyz2.data<float>(), n,
xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b, int n, const float *xyz1, int m, const float *xyz2, const float *grad_dist1,
const int *idx1, float *grad_xyz1, float *grad_xyz2) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = idx1[i * n + j];
float x2 = xyz2[(i * m + j2) * 3 + 0];
float y2 = xyz2[(i * m + j2) * 3 + 1];
float z2 = xyz2[(i * m + j2) * 3 + 2];
float g = grad_dist1[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2)));
}
}
}
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1,
at::Tensor gradxyz2, at::Tensor graddist1,
at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) {
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); // num_points point cloud A
const auto m = xyz2.size(1); // num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel) , dim3(dim3(1, 16, 1)), dim3(256) , 0, 0, batch_size, n, xyz1.data<float>(), m,
xyz2.data<float>(), graddist1.data<float>(), idx1.data<int>(),
gradxyz1.data<float>(), gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel) , dim3(dim3(1, 16, 1)), dim3(256) , 0, 0, batch_size, m, xyz2.data<float>(), n,
xyz1.data<float>(), graddist2.data<float>(), idx2.data<int>(),
gradxyz2.data<float>(), gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
} | a4e8665e210a8fcbbe99edcc89eb116e0eecb1cd.cu | #include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b, int n, const float *xyz, int m,
const float *xyz2, float *result, int *result_i) {
const int batch = 512;
__shared__ float buf[batch * 3];
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int k2 = 0; k2 < m; k2 += batch) {
int end_k = min(m, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) {
buf[j] = xyz2[(i * m + k2) * 3 + j];
}
__syncthreads();
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz[(i * n + j) * 3 + 0];
float y1 = xyz[(i * n + j) * 3 + 1];
float z1 = xyz[(i * n + j) * 3 + 2];
int best_i = 0;
float best = 0;
int end_ka = end_k - (end_k & 3);
if (end_ka == batch) {
for (int k = 0; k < batch; k += 4) {
{
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
{
float x2 = buf[k * 3 + 3] - x1;
float y2 = buf[k * 3 + 4] - y1;
float z2 = buf[k * 3 + 5] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 1;
}
}
{
float x2 = buf[k * 3 + 6] - x1;
float y2 = buf[k * 3 + 7] - y1;
float z2 = buf[k * 3 + 8] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 2;
}
}
{
float x2 = buf[k * 3 + 9] - x1;
float y2 = buf[k * 3 + 10] - y1;
float z2 = buf[k * 3 + 11] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 3;
}
}
}
} else {
for (int k = 0; k < end_ka; k += 4) {
{
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
{
float x2 = buf[k * 3 + 3] - x1;
float y2 = buf[k * 3 + 4] - y1;
float z2 = buf[k * 3 + 5] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 1;
}
}
{
float x2 = buf[k * 3 + 6] - x1;
float y2 = buf[k * 3 + 7] - y1;
float z2 = buf[k * 3 + 8] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 2;
}
}
{
float x2 = buf[k * 3 + 9] - x1;
float y2 = buf[k * 3 + 10] - y1;
float z2 = buf[k * 3 + 11] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (d < best) {
best = d;
best_i = k + k2 + 3;
}
}
}
}
for (int k = end_ka; k < end_k; k++) {
float x2 = buf[k * 3 + 0] - x1;
float y2 = buf[k * 3 + 1] - y1;
float z2 = buf[k * 3 + 2] - z1;
float d = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || d < best) {
best = d;
best_i = k + k2;
}
}
if (k2 == 0 || result[(i * n + j)] > best) {
result[(i * n + j)] = best;
result_i[(i * n + j)] = best_i;
}
}
__syncthreads();
}
}
}
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1,
at::Tensor idx2) {
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel <<< dim3(32, 16, 1), 512 >>> (batch_size, n, xyz1.data<float>(), m,
xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel <<< dim3(32, 16, 1), 512 >>> (batch_size, m, xyz2.data<float>(), n,
xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b, int n, const float *xyz1, int m, const float *xyz2, const float *grad_dist1,
const int *idx1, float *grad_xyz1, float *grad_xyz2) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = idx1[i * n + j];
float x2 = xyz2[(i * m + j2) * 3 + 0];
float y2 = xyz2[(i * m + j2) * 3 + 1];
float z2 = xyz2[(i * m + j2) * 3 + 2];
float g = grad_dist1[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2)));
}
}
}
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1,
at::Tensor gradxyz2, at::Tensor graddist1,
at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) {
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); // num_points point cloud A
const auto m = xyz2.size(1); // num_points point cloud B
NmDistanceGradKernel <<< dim3(1, 16, 1), 256 >>> (batch_size, n, xyz1.data<float>(), m,
xyz2.data<float>(), graddist1.data<float>(), idx1.data<int>(),
gradxyz1.data<float>(), gradxyz2.data<float>());
NmDistanceGradKernel <<< dim3(1, 16, 1), 256 >>> (batch_size, m, xyz2.data<float>(), n,
xyz1.data<float>(), graddist2.data<float>(), idx2.data<int>(),
gradxyz2.data<float>(), gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
} |
7940321c9ca8aff014cdb07a67998e53ff93907f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/**
* C file for parallel QR factorization program usign CUDA
* See header for more infos.
*
* 2016 Marco Tieghi - marco01.tieghi@student.unife.it
*
*/
#define THREADS_PER_BLOCK 512 //I'll use 512 threads for each block (as required in the assignment)
__global__ void scale(double *d, int m, int ld, double *s) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < m) {
d[idx*ld] = d[idx*ld] / sqrt(*s); //Applying scale
}
} | 7940321c9ca8aff014cdb07a67998e53ff93907f.cu | #include "includes.h"
/**
* C file for parallel QR factorization program usign CUDA
* See header for more infos.
*
* 2016 Marco Tieghi - marco01.tieghi@student.unife.it
*
*/
#define THREADS_PER_BLOCK 512 //I'll use 512 threads for each block (as required in the assignment)
__global__ void scale(double *d, int m, int ld, double *s) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < m) {
d[idx*ld] = d[idx*ld] / sqrt(*s); //Applying scale
}
} |
aad4275c3a25d6ef3a9bf694410dcd41fd04df41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__
void find_max(const float* const d_logLuminance,
const size_t numPixels,
float *d_max_logLum
)
{
extern __shared__ float sdata[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_logLuminance[x];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sdata[tid]<sdata[tid+s]) {
sdata[tid] = sdata[tid + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_max_logLum[blockIdx.x] = sdata[0];
}
}
__global__
void find_min(const float* const d_logLuminance,
const size_t numPixels,
float *d_min_logLum
)
{
extern __shared__ float sdata[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_logLuminance[x];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sdata[tid]>sdata[tid+s]) {
sdata[tid] = sdata[tid + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_min_logLum[blockIdx.x] = sdata[0];
}
}
__global__
void gen_histo(const float* const d_logLuminance,
const size_t numPixels,
const size_t numBins,
unsigned int *d_histo,
const float logLumMin,
const float logLumRange
)
{
extern __shared__ int s_histo[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx=threadIdx.x;idx<numBins;idx+=blockDim.x) {
s_histo[idx] = 0;
}
__syncthreads(); // make sure entire block is loaded!
if (x<numPixels) {
unsigned int bin = static_cast<unsigned int>((d_logLuminance[x] - logLumMin) / logLumRange * numBins);
if(bin>=numBins) {
bin=numBins-1;
}
atomicAdd(&(s_histo[bin]), 1);
}
__syncthreads(); // make sure entire block is loaded!
for(int idx=threadIdx.x;idx<numBins;idx+=blockDim.x) {
atomicAdd(&(d_histo[idx]), s_histo[idx]);
}
}
__global__
void get_cdf(const unsigned int * const d_histo,
const size_t numBins,
unsigned int * d_cdf
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x!=0) {
return;
}
d_cdf[0]=0;
for(int i=1;i<numBins;i++) {
d_cdf[i]=d_histo[i-1]+d_cdf[i-1];
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
//Step 1
//first we find the minimum and maximum across the entire image
float *d_min_logLum;
float *d_max_logLum;
size_t numPixels=numCols*numRows;
{
const dim3 blockSize(256, 1, 1);
dim3 gridSize((numPixels+blockSize.x-1)/blockSize.x , 1 , 1);
checkCudaErrors(hipMalloc(&d_min_logLum,gridSize.x*sizeof(float)));
hipLaunchKernelGGL(( find_min), dim3(gridSize), dim3(blockSize), blockSize.x*sizeof(float), 0, d_logLuminance, numPixels, d_min_logLum);
checkCudaErrors(hipMalloc(&d_max_logLum,gridSize.x*sizeof(float)));
hipLaunchKernelGGL(( find_max), dim3(gridSize), dim3(blockSize), blockSize.x*sizeof(float), 0, d_logLuminance, numPixels, d_max_logLum);
while(gridSize.x>1) {
int groupSize=gridSize.x;
gridSize.x=(groupSize+blockSize.x-1)/blockSize.x;
hipLaunchKernelGGL(( find_min), dim3(gridSize), dim3(blockSize), blockSize.x*sizeof(float), 0, d_min_logLum, groupSize, d_min_logLum);
hipLaunchKernelGGL(( find_max), dim3(gridSize), dim3(blockSize), blockSize.x*sizeof(float), 0, d_max_logLum, groupSize, d_max_logLum);
}
}
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&min_logLum, d_min_logLum, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_logLum, d_max_logLum, sizeof(float), hipMemcpyDeviceToHost));
//Step 2 && Step 3
unsigned int *d_histo;
{
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((numPixels+blockSize.x-1)/blockSize.x , 1 , 1);
checkCudaErrors(hipMalloc(&d_histo,numBins*sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_histo,0,numBins*sizeof(unsigned int)));
hipLaunchKernelGGL(( gen_histo), dim3(gridSize), dim3(blockSize),numBins*sizeof(unsigned int), 0, d_logLuminance, numPixels, numBins,d_histo,min_logLum,max_logLum-min_logLum);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Step 4
{
const dim3 blockSize(256, 1, 1);
hipLaunchKernelGGL(( get_cdf), dim3(1), dim3(blockSize), 0, 0, d_histo, numBins,d_cdf);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
}
| aad4275c3a25d6ef3a9bf694410dcd41fd04df41.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__
void find_max(const float* const d_logLuminance,
const size_t numPixels,
float *d_max_logLum
)
{
extern __shared__ float sdata[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_logLuminance[x];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sdata[tid]<sdata[tid+s]) {
sdata[tid] = sdata[tid + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_max_logLum[blockIdx.x] = sdata[0];
}
}
__global__
void find_min(const float* const d_logLuminance,
const size_t numPixels,
float *d_min_logLum
)
{
extern __shared__ float sdata[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_logLuminance[x];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sdata[tid]>sdata[tid+s]) {
sdata[tid] = sdata[tid + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_min_logLum[blockIdx.x] = sdata[0];
}
}
__global__
void gen_histo(const float* const d_logLuminance,
const size_t numPixels,
const size_t numBins,
unsigned int *d_histo,
const float logLumMin,
const float logLumRange
)
{
extern __shared__ int s_histo[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
for(int idx=threadIdx.x;idx<numBins;idx+=blockDim.x) {
s_histo[idx] = 0;
}
__syncthreads(); // make sure entire block is loaded!
if (x<numPixels) {
unsigned int bin = static_cast<unsigned int>((d_logLuminance[x] - logLumMin) / logLumRange * numBins);
if(bin>=numBins) {
bin=numBins-1;
}
atomicAdd(&(s_histo[bin]), 1);
}
__syncthreads(); // make sure entire block is loaded!
for(int idx=threadIdx.x;idx<numBins;idx+=blockDim.x) {
atomicAdd(&(d_histo[idx]), s_histo[idx]);
}
}
__global__
void get_cdf(const unsigned int * const d_histo,
const size_t numBins,
unsigned int * d_cdf
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x!=0) {
return;
}
d_cdf[0]=0;
for(int i=1;i<numBins;i++) {
d_cdf[i]=d_histo[i-1]+d_cdf[i-1];
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
//Step 1
//first we find the minimum and maximum across the entire image
float *d_min_logLum;
float *d_max_logLum;
size_t numPixels=numCols*numRows;
{
const dim3 blockSize(256, 1, 1);
dim3 gridSize((numPixels+blockSize.x-1)/blockSize.x , 1 , 1);
checkCudaErrors(cudaMalloc(&d_min_logLum,gridSize.x*sizeof(float)));
find_min<<<gridSize, blockSize, blockSize.x*sizeof(float)>>>(d_logLuminance, numPixels, d_min_logLum);
checkCudaErrors(cudaMalloc(&d_max_logLum,gridSize.x*sizeof(float)));
find_max<<<gridSize, blockSize, blockSize.x*sizeof(float)>>>(d_logLuminance, numPixels, d_max_logLum);
while(gridSize.x>1) {
int groupSize=gridSize.x;
gridSize.x=(groupSize+blockSize.x-1)/blockSize.x;
find_min<<<gridSize, blockSize, blockSize.x*sizeof(float)>>>(d_min_logLum, groupSize, d_min_logLum);
find_max<<<gridSize, blockSize, blockSize.x*sizeof(float)>>>(d_max_logLum, groupSize, d_max_logLum);
}
}
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&min_logLum, d_min_logLum, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_logLum, d_max_logLum, sizeof(float), cudaMemcpyDeviceToHost));
//Step 2 && Step 3
unsigned int *d_histo;
{
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((numPixels+blockSize.x-1)/blockSize.x , 1 , 1);
checkCudaErrors(cudaMalloc(&d_histo,numBins*sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_histo,0,numBins*sizeof(unsigned int)));
gen_histo<<<gridSize, blockSize,numBins*sizeof(unsigned int)>>>(d_logLuminance, numPixels, numBins,d_histo,min_logLum,max_logLum-min_logLum);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Step 4
{
const dim3 blockSize(256, 1, 1);
get_cdf<<<1, blockSize>>>(d_histo, numBins,d_cdf);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
}
|
6aff780de73260e946d9a26f380bd37df6754901.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "MonolingReader.h"
#include "CorpusReader.h"
namespace cytonMt
{
void MonolingReader::init(const string& file_name_, Vocabulary* vocab_)
{
fileName=file_name_;
vocab = vocab_;
}
void MonolingReader::reset()
{
if(fileName!="stdin")
{
file.close();
file.open(fileName);
}
}
void MonolingReader::closeFile()
{
file.close();
}
int MonolingReader::read_mini_batch(int batchSize, bool ignoreUnk, HostMatInt& matrix, string* raw)
{
vector<vector<int>> sents;
int maxLen=0;
vector<int> sent;
for(int i=0;i<batchSize;i++)
{
string line;
bool read=true;
if(fileName!="stdin")
{
read=std::getline(file, line, '\n');
}
else
{
read=std::getline(std::cin, line, '\n');
}
if(read && !line.empty())
{
vocab->parse(line, sent, ignoreUnk);
sents.push_back(sent);
maxLen=::max(maxLen, (int)sent.size());
*raw += line+"\n";
}
if(!read)
{
break;
}
}
if(!sents.empty())
{
packMatrix(sents, maxLen, batchSize, matrix, true, vocab->sos, vocab->eos, vocab->empty);
}
return sents.size();
}
} /* namespace cudaRnnTrans */
| 6aff780de73260e946d9a26f380bd37df6754901.cu | /*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "MonolingReader.h"
#include "CorpusReader.h"
namespace cytonMt
{
void MonolingReader::init(const string& file_name_, Vocabulary* vocab_)
{
fileName=file_name_;
vocab = vocab_;
}
void MonolingReader::reset()
{
if(fileName!="stdin")
{
file.close();
file.open(fileName);
}
}
void MonolingReader::closeFile()
{
file.close();
}
int MonolingReader::read_mini_batch(int batchSize, bool ignoreUnk, HostMatInt& matrix, string* raw)
{
vector<vector<int>> sents;
int maxLen=0;
vector<int> sent;
for(int i=0;i<batchSize;i++)
{
string line;
bool read=true;
if(fileName!="stdin")
{
read=std::getline(file, line, '\n');
}
else
{
read=std::getline(std::cin, line, '\n');
}
if(read && !line.empty())
{
vocab->parse(line, sent, ignoreUnk);
sents.push_back(sent);
maxLen=std::max(maxLen, (int)sent.size());
*raw += line+"\n";
}
if(!read)
{
break;
}
}
if(!sents.empty())
{
packMatrix(sents, maxLen, batchSize, matrix, true, vocab->sos, vocab->eos, vocab->empty);
}
return sents.size();
}
} /* namespace cudaRnnTrans */
|
1432b87cd4fcd303212e0fc627d28634d3a8fc8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <cblas.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x6(float* x7, float* x8, float* x9, int x10) {
// begin generating kernel function for ADD of type Float
int x11 = gridDim.x * blockDim.x;
int x12 = threadIdx.x + blockIdx.x * blockDim.x;
while (x12 < x10) {
int x13 = x12;
x9[x13] = x7[x13] + x8[x13];
x12 = x12 + x11;
}
// end generating kernel function for ADD of type Float
}
/**************** Snippet ****************/
void Snippet(int x0) {
float x1[6] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 };
float x2[6] = { 6.0, 5.0, 4.0, 3.0, 2.0, 1.0 };
float* x3 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x3, (size_t)(6 * sizeof(float))));
CUDA_CALL(hipMemcpy(x3, x1, (size_t)(6 * sizeof(float)), hipMemcpyHostToDevice));
float* x4 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x4, (size_t)(6 * sizeof(float))));
CUDA_CALL(hipMemcpy(x4, x2, (size_t)(6 * sizeof(float)), hipMemcpyHostToDevice));
float* x5 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x5, (size_t)(6 * sizeof(float))));
hipLaunchKernelGGL(( x6), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x3, x4, x5, 6);
float* x14 = (float*)malloc(6 * sizeof(float));
CUDA_CALL(hipMemcpy(x14, x5, (size_t)(6 * sizeof(float)), hipMemcpyDeviceToHost));
int x15 = 0;
while (x15 != 6) {
printf("%f ", x14[x15]);
x15 = x15 + 1;
}
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| 1432b87cd4fcd303212e0fc627d28634d3a8fc8a.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <cblas.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x6(float* x7, float* x8, float* x9, int x10) {
// begin generating kernel function for ADD of type Float
int x11 = gridDim.x * blockDim.x;
int x12 = threadIdx.x + blockIdx.x * blockDim.x;
while (x12 < x10) {
int x13 = x12;
x9[x13] = x7[x13] + x8[x13];
x12 = x12 + x11;
}
// end generating kernel function for ADD of type Float
}
/**************** Snippet ****************/
void Snippet(int x0) {
float x1[6] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 };
float x2[6] = { 6.0, 5.0, 4.0, 3.0, 2.0, 1.0 };
float* x3 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x3, (size_t)(6 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x3, x1, (size_t)(6 * sizeof(float)), cudaMemcpyHostToDevice));
float* x4 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x4, (size_t)(6 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x4, x2, (size_t)(6 * sizeof(float)), cudaMemcpyHostToDevice));
float* x5 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x5, (size_t)(6 * sizeof(float))));
x6<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x3, x4, x5, 6);
float* x14 = (float*)malloc(6 * sizeof(float));
CUDA_CALL(cudaMemcpy(x14, x5, (size_t)(6 * sizeof(float)), cudaMemcpyDeviceToHost));
int x15 = 0;
while (x15 != 6) {
printf("%f ", x14[x15]);
x15 = x15 + 1;
}
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
c6b628d083584acdc12717e83e87c44b4459452d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/stream.h"
#ifdef PADDLE_WITH_CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
namespace paddle {
namespace memory {
// y += (x + 1)
__global__ void add_kernel(int *x, int *y, int n) {
int thread_num = gridDim.x * blockDim.x;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = thread_id; i < n; i += thread_num) {
y[i] += x[i] + 1;
}
}
void CheckMemLeak(const platform::CUDAPlace &place) {
uint64_t cuda_malloc_size =
platform::RecordedGpuMallocSize(place.GetDeviceId());
ASSERT_EQ(cuda_malloc_size, 0) << "Found " << cuda_malloc_size
<< " bytes memory that not released yet,"
<< " there may be a memory leak problem";
}
TEST(StreamSafeCUDAAllocInterfaceTest, AllocInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
std::shared_ptr<Allocation> allocation_implicit_stream =
AllocShared(place, alloc_size);
EXPECT_GE(allocation_implicit_stream->size(), alloc_size);
void *address = allocation_implicit_stream->ptr();
allocation_implicit_stream.reset();
gpuStream_t default_stream =
dynamic_cast<platform::CUDADeviceContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
allocation::AllocationPtr allocation_unique =
Alloc(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(default_stream)));
EXPECT_GE(allocation_unique->size(), alloc_size);
EXPECT_EQ(allocation_unique->ptr(), address);
allocation_unique.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetAllocatorInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
allocation::AllocationPtr allocation_implicit_stream =
Alloc(place, alloc_size);
EXPECT_GE(allocation_implicit_stream->size(), alloc_size);
void *address = allocation_implicit_stream->ptr();
allocation_implicit_stream.reset();
auto &instance = allocation::AllocatorFacade::Instance();
const std::shared_ptr<Allocator> &allocator = instance.GetAllocator(place);
allocation::AllocationPtr allocation_from_allocator =
allocator->Allocate(alloc_size);
EXPECT_GE(allocation_from_allocator->size(), alloc_size);
EXPECT_EQ(allocation_from_allocator->ptr(), address);
allocation_from_allocator.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetAllocatorWithDefaultStreamTest) {
auto &instance = allocation::AllocatorFacade::Instance();
platform::CUDAPlace place = platform::CUDAPlace();
const std::shared_ptr<Allocator> allocator_implicit_stream =
instance.GetAllocator(place);
const std::shared_ptr<Allocator> allocator_default_stream =
instance.GetAllocator(
place, static_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place))
->stream());
EXPECT_EQ(allocator_implicit_stream.get(), allocator_default_stream.get());
}
TEST(StreamSafeCUDAAllocInterfaceTest, ZeroSizeRecordStreamTest) {
platform::CUDAPlace place = platform::CUDAPlace();
std::shared_ptr<Allocation> zero_size_allocation = AllocShared(place, 0);
EXPECT_EQ(zero_size_allocation->ptr(), nullptr);
gpuStream_t stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#endif
EXPECT_NO_THROW(RecordStream(zero_size_allocation, stream));
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream));
#endif
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetStreamInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
gpuStream_t default_stream =
dynamic_cast<platform::CUDADeviceContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
std::shared_ptr<Allocation> allocation_implicit_stream =
AllocShared(place, alloc_size);
EXPECT_EQ(GetStream(allocation_implicit_stream), default_stream);
gpuStream_t new_stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&new_stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&new_stream));
#endif
std::shared_ptr<Allocation> allocation_new_stream =
AllocShared(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(new_stream)));
EXPECT_EQ(GetStream(allocation_new_stream), new_stream);
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(new_stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(new_stream));
#endif
allocation_implicit_stream.reset();
allocation_new_stream.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocRetryTest, RetryTest) {
platform::CUDAPlace place = platform::CUDAPlace();
gpuStream_t stream1, stream2;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream1));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream2));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream1));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream2));
#endif
size_t available_size = platform::GpuAvailableMemToAlloc();
// alloc_size < available_size < 2 * alloc_size,
// so the second alloc will fail and retry
size_t alloc_size = available_size / 4 * 3;
allocation::AllocationPtr allocation1 = Alloc(
place, alloc_size, phi::Stream(reinterpret_cast<phi::StreamId>(stream1)));
allocation::AllocationPtr allocation2;
std::thread th([&allocation2, &place, &stream2, alloc_size]() {
std::this_thread::sleep_for(std::chrono::seconds(1));
allocation2 = Alloc(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(stream2)));
});
allocation1.reset(); // free but not release
th.join();
EXPECT_GE(allocation2->size(), alloc_size);
allocation2.reset();
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize());
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize());
#endif
Release(place, stream1);
Release(place, stream2);
CheckMemLeak(place);
}
class StreamSafeCUDAAllocTest : public ::testing::Test {
protected:
void SetUp() override {
place_ = platform::CUDAPlace();
stream_num_ = 64;
grid_num_ = 1;
block_num_ = 32;
data_num_ = 131072;
workspace_size_ = data_num_ * sizeof(int);
for (size_t i = 0; i < stream_num_; ++i) {
gpuStream_t stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#endif
std::shared_ptr<phi::Allocation> workspace_allocation =
AllocShared(place_, workspace_size_,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
std::shared_ptr<phi::Allocation> result_allocation =
AllocShared(place_, workspace_size_,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
std::shared_ptr<phi::Allocation> host_result_allocation =
AllocShared(platform::CPUPlace(), workspace_size_);
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipMemset(workspace_allocation->ptr(), 0,
workspace_allocation->size()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemset(result_allocation->ptr(), 0, result_allocation->size()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemset(workspace_allocation->ptr(), 0,
workspace_allocation->size()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemset(result_allocation->ptr(), 0, result_allocation->size()));
#endif
streams_.emplace_back(stream);
workspaces_.emplace_back(workspace_allocation);
results_.emplace_back(result_allocation);
host_results_.emplace_back(host_result_allocation);
}
}
void SingleStreamRun(size_t idx) {
int *y = reinterpret_cast<int *>(results_[idx]->ptr());
int neighbouring_idx = idx > 0 ? idx - 1 : idx;
hipLaunchKernelGGL(( add_kernel), dim3(grid_num_), dim3(block_num_), 0, streams_[idx],
reinterpret_cast<int *>(workspaces_[idx]->ptr()), y, data_num_);
hipLaunchKernelGGL(( add_kernel), dim3(grid_num_), dim3(block_num_), 0, streams_[idx],
reinterpret_cast<int *>(workspaces_[neighbouring_idx]->ptr()), y,
data_num_);
RecordStream(workspaces_[neighbouring_idx], streams_[idx]);
}
void MultiStreamRun() {
// Must run in reverse order, or the workspace_[i - 1] will be released
// before streams_[i]'s kernel launch
for (int i = stream_num_ - 1; i >= 0; --i) {
SingleStreamRun(i);
workspaces_[i].reset(); // fast GC
}
}
void MultiThreadMultiStreamRun() {
std::vector<std::thread> threads;
for (size_t i = 0; i < stream_num_; ++i) {
threads.push_back(
std::thread(&StreamSafeCUDAAllocTest::SingleStreamRun, this, i));
}
for (size_t i = 0; i < stream_num_; ++i) {
threads[i].join();
}
workspaces_.clear();
}
void CUDAGraphRun() {
testing_cuda_graph_ = true;
platform::BeginCUDAGraphCapture(platform::CUDAPlace(),
hipStreamCaptureModeGlobal);
std::shared_ptr<Allocation> data_allocation =
AllocShared(platform::CUDAPlace(), workspace_size_);
std::shared_ptr<Allocation> result_allocation =
AllocShared(platform::CUDAPlace(), workspace_size_);
int *data = static_cast<int *>(data_allocation->ptr());
int *result = static_cast<int *>(result_allocation->ptr());
gpuStream_t main_stream = GetStream(data_allocation);
gpuStream_t other_stream;
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&other_stream));
hipLaunchKernelGGL(( add_kernel), dim3(grid_num_), dim3(block_num_), 0, main_stream, data, result,
data_num_);
RecordStream(data_allocation, other_stream);
std::unique_ptr<platform::CUDAGraph> cuda_graph =
platform::EndCUDAGraphCapture();
int replay_times = 10;
for (int i = 0; i < replay_times; ++i) {
cuda_graph->Replay();
}
std::shared_ptr<Allocation> host_result_allocation =
AllocShared(platform::CPUPlace(), workspace_size_);
Copy(host_result_allocation->place(), host_result_allocation->ptr(),
result_allocation->place(), result_allocation->ptr(), workspace_size_,
main_stream);
hipStreamSynchronize(main_stream);
int *host_result = static_cast<int *>(host_result_allocation->ptr());
for (int i = 0; i < data_num_; ++i) {
EXPECT_EQ(host_result[i], replay_times);
}
data_allocation.reset();
result_allocation.reset();
cuda_graph.release();
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(other_stream));
}
void CheckResult() {
for (size_t i = 0; i < stream_num_; ++i) {
Copy(host_results_[i]->place(), host_results_[i]->ptr(),
results_[i]->place(), results_[i]->ptr(), workspace_size_,
streams_[i]);
}
hipDeviceSynchronize();
size_t thread_num = grid_num_ * block_num_;
for (size_t i = 0; i < stream_num_; ++i) {
int *result = static_cast<int *>(host_results_[i]->ptr());
for (size_t j = 0; j < data_num_; ++j) {
EXPECT_EQ(result[j], 2);
}
}
}
void TearDown() override {
workspaces_.clear();
results_.clear();
host_results_.clear();
for (gpuStream_t stream : streams_) {
Release(place_, stream);
}
for (size_t i = 0; i < stream_num_; ++i) {
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(streams_[i]));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(streams_[i]));
#endif
}
// Memory release for CUDA Graph memory pool is forbidden
if (!testing_cuda_graph_) {
CheckMemLeak(place_);
}
}
bool testing_cuda_graph_{0};
size_t stream_num_;
size_t grid_num_;
size_t block_num_;
size_t data_num_;
size_t workspace_size_;
platform::CUDAPlace place_;
std::vector<gpuStream_t> streams_;
std::vector<std::shared_ptr<phi::Allocation>> workspaces_;
std::vector<std::shared_ptr<phi::Allocation>> results_;
std::vector<std::shared_ptr<phi::Allocation>> host_results_;
};
TEST_F(StreamSafeCUDAAllocTest, CUDAMutilStreamTest) {
MultiStreamRun();
CheckResult();
}
TEST_F(StreamSafeCUDAAllocTest, CUDAMutilThreadMutilStreamTest) {
MultiThreadMultiStreamRun();
CheckResult();
}
#ifdef PADDLE_WITH_CUDA
TEST_F(StreamSafeCUDAAllocTest, CUDAGraphTest) {
MultiStreamRun();
CUDAGraphRun();
CheckResult();
}
#endif
} // namespace memory
} // namespace paddle
| c6b628d083584acdc12717e83e87c44b4459452d.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/stream.h"
#ifdef PADDLE_WITH_CUDA
#include <cuda.h>
#include <cuda_runtime.h>
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
namespace paddle {
namespace memory {
// y += (x + 1)
__global__ void add_kernel(int *x, int *y, int n) {
int thread_num = gridDim.x * blockDim.x;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = thread_id; i < n; i += thread_num) {
y[i] += x[i] + 1;
}
}
void CheckMemLeak(const platform::CUDAPlace &place) {
uint64_t cuda_malloc_size =
platform::RecordedGpuMallocSize(place.GetDeviceId());
ASSERT_EQ(cuda_malloc_size, 0) << "Found " << cuda_malloc_size
<< " bytes memory that not released yet,"
<< " there may be a memory leak problem";
}
TEST(StreamSafeCUDAAllocInterfaceTest, AllocInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
std::shared_ptr<Allocation> allocation_implicit_stream =
AllocShared(place, alloc_size);
EXPECT_GE(allocation_implicit_stream->size(), alloc_size);
void *address = allocation_implicit_stream->ptr();
allocation_implicit_stream.reset();
gpuStream_t default_stream =
dynamic_cast<platform::CUDADeviceContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
allocation::AllocationPtr allocation_unique =
Alloc(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(default_stream)));
EXPECT_GE(allocation_unique->size(), alloc_size);
EXPECT_EQ(allocation_unique->ptr(), address);
allocation_unique.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetAllocatorInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
allocation::AllocationPtr allocation_implicit_stream =
Alloc(place, alloc_size);
EXPECT_GE(allocation_implicit_stream->size(), alloc_size);
void *address = allocation_implicit_stream->ptr();
allocation_implicit_stream.reset();
auto &instance = allocation::AllocatorFacade::Instance();
const std::shared_ptr<Allocator> &allocator = instance.GetAllocator(place);
allocation::AllocationPtr allocation_from_allocator =
allocator->Allocate(alloc_size);
EXPECT_GE(allocation_from_allocator->size(), alloc_size);
EXPECT_EQ(allocation_from_allocator->ptr(), address);
allocation_from_allocator.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetAllocatorWithDefaultStreamTest) {
auto &instance = allocation::AllocatorFacade::Instance();
platform::CUDAPlace place = platform::CUDAPlace();
const std::shared_ptr<Allocator> allocator_implicit_stream =
instance.GetAllocator(place);
const std::shared_ptr<Allocator> allocator_default_stream =
instance.GetAllocator(
place, static_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place))
->stream());
EXPECT_EQ(allocator_implicit_stream.get(), allocator_default_stream.get());
}
TEST(StreamSafeCUDAAllocInterfaceTest, ZeroSizeRecordStreamTest) {
platform::CUDAPlace place = platform::CUDAPlace();
std::shared_ptr<Allocation> zero_size_allocation = AllocShared(place, 0);
EXPECT_EQ(zero_size_allocation->ptr(), nullptr);
gpuStream_t stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#endif
EXPECT_NO_THROW(RecordStream(zero_size_allocation, stream));
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream));
#endif
}
TEST(StreamSafeCUDAAllocInterfaceTest, GetStreamInterfaceTest) {
platform::CUDAPlace place = platform::CUDAPlace();
size_t alloc_size = 256;
gpuStream_t default_stream =
dynamic_cast<platform::CUDADeviceContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
std::shared_ptr<Allocation> allocation_implicit_stream =
AllocShared(place, alloc_size);
EXPECT_EQ(GetStream(allocation_implicit_stream), default_stream);
gpuStream_t new_stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&new_stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&new_stream));
#endif
std::shared_ptr<Allocation> allocation_new_stream =
AllocShared(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(new_stream)));
EXPECT_EQ(GetStream(allocation_new_stream), new_stream);
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(new_stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(new_stream));
#endif
allocation_implicit_stream.reset();
allocation_new_stream.reset();
Release(place);
CheckMemLeak(place);
}
TEST(StreamSafeCUDAAllocRetryTest, RetryTest) {
platform::CUDAPlace place = platform::CUDAPlace();
gpuStream_t stream1, stream2;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream1));
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream2));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream1));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream2));
#endif
size_t available_size = platform::GpuAvailableMemToAlloc();
// alloc_size < available_size < 2 * alloc_size,
// so the second alloc will fail and retry
size_t alloc_size = available_size / 4 * 3;
allocation::AllocationPtr allocation1 = Alloc(
place, alloc_size, phi::Stream(reinterpret_cast<phi::StreamId>(stream1)));
allocation::AllocationPtr allocation2;
std::thread th([&allocation2, &place, &stream2, alloc_size]() {
std::this_thread::sleep_for(std::chrono::seconds(1));
allocation2 = Alloc(place, alloc_size,
phi::Stream(reinterpret_cast<phi::StreamId>(stream2)));
});
allocation1.reset(); // free but not release
th.join();
EXPECT_GE(allocation2->size(), alloc_size);
allocation2.reset();
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize());
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize());
#endif
Release(place, stream1);
Release(place, stream2);
CheckMemLeak(place);
}
class StreamSafeCUDAAllocTest : public ::testing::Test {
protected:
void SetUp() override {
place_ = platform::CUDAPlace();
stream_num_ = 64;
grid_num_ = 1;
block_num_ = 32;
data_num_ = 131072;
workspace_size_ = data_num_ * sizeof(int);
for (size_t i = 0; i < stream_num_; ++i) {
gpuStream_t stream;
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream));
#endif
std::shared_ptr<phi::Allocation> workspace_allocation =
AllocShared(place_, workspace_size_,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
std::shared_ptr<phi::Allocation> result_allocation =
AllocShared(place_, workspace_size_,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
std::shared_ptr<phi::Allocation> host_result_allocation =
AllocShared(platform::CPUPlace(), workspace_size_);
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemset(workspace_allocation->ptr(), 0,
workspace_allocation->size()));
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemset(result_allocation->ptr(), 0, result_allocation->size()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemset(workspace_allocation->ptr(), 0,
workspace_allocation->size()));
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemset(result_allocation->ptr(), 0, result_allocation->size()));
#endif
streams_.emplace_back(stream);
workspaces_.emplace_back(workspace_allocation);
results_.emplace_back(result_allocation);
host_results_.emplace_back(host_result_allocation);
}
}
void SingleStreamRun(size_t idx) {
int *y = reinterpret_cast<int *>(results_[idx]->ptr());
int neighbouring_idx = idx > 0 ? idx - 1 : idx;
add_kernel<<<grid_num_, block_num_, 0, streams_[idx]>>>(
reinterpret_cast<int *>(workspaces_[idx]->ptr()), y, data_num_);
add_kernel<<<grid_num_, block_num_, 0, streams_[idx]>>>(
reinterpret_cast<int *>(workspaces_[neighbouring_idx]->ptr()), y,
data_num_);
RecordStream(workspaces_[neighbouring_idx], streams_[idx]);
}
void MultiStreamRun() {
// Must run in reverse order, or the workspace_[i - 1] will be released
// before streams_[i]'s kernel launch
for (int i = stream_num_ - 1; i >= 0; --i) {
SingleStreamRun(i);
workspaces_[i].reset(); // fast GC
}
}
void MultiThreadMultiStreamRun() {
std::vector<std::thread> threads;
for (size_t i = 0; i < stream_num_; ++i) {
threads.push_back(
std::thread(&StreamSafeCUDAAllocTest::SingleStreamRun, this, i));
}
for (size_t i = 0; i < stream_num_; ++i) {
threads[i].join();
}
workspaces_.clear();
}
void CUDAGraphRun() {
testing_cuda_graph_ = true;
platform::BeginCUDAGraphCapture(platform::CUDAPlace(),
cudaStreamCaptureModeGlobal);
std::shared_ptr<Allocation> data_allocation =
AllocShared(platform::CUDAPlace(), workspace_size_);
std::shared_ptr<Allocation> result_allocation =
AllocShared(platform::CUDAPlace(), workspace_size_);
int *data = static_cast<int *>(data_allocation->ptr());
int *result = static_cast<int *>(result_allocation->ptr());
gpuStream_t main_stream = GetStream(data_allocation);
gpuStream_t other_stream;
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&other_stream));
add_kernel<<<grid_num_, block_num_, 0, main_stream>>>(data, result,
data_num_);
RecordStream(data_allocation, other_stream);
std::unique_ptr<platform::CUDAGraph> cuda_graph =
platform::EndCUDAGraphCapture();
int replay_times = 10;
for (int i = 0; i < replay_times; ++i) {
cuda_graph->Replay();
}
std::shared_ptr<Allocation> host_result_allocation =
AllocShared(platform::CPUPlace(), workspace_size_);
Copy(host_result_allocation->place(), host_result_allocation->ptr(),
result_allocation->place(), result_allocation->ptr(), workspace_size_,
main_stream);
cudaStreamSynchronize(main_stream);
int *host_result = static_cast<int *>(host_result_allocation->ptr());
for (int i = 0; i < data_num_; ++i) {
EXPECT_EQ(host_result[i], replay_times);
}
data_allocation.reset();
result_allocation.reset();
cuda_graph.release();
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(other_stream));
}
void CheckResult() {
for (size_t i = 0; i < stream_num_; ++i) {
Copy(host_results_[i]->place(), host_results_[i]->ptr(),
results_[i]->place(), results_[i]->ptr(), workspace_size_,
streams_[i]);
}
cudaDeviceSynchronize();
size_t thread_num = grid_num_ * block_num_;
for (size_t i = 0; i < stream_num_; ++i) {
int *result = static_cast<int *>(host_results_[i]->ptr());
for (size_t j = 0; j < data_num_; ++j) {
EXPECT_EQ(result[j], 2);
}
}
}
void TearDown() override {
workspaces_.clear();
results_.clear();
host_results_.clear();
for (gpuStream_t stream : streams_) {
Release(place_, stream);
}
for (size_t i = 0; i < stream_num_; ++i) {
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(streams_[i]));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(streams_[i]));
#endif
}
// Memory release for CUDA Graph memory pool is forbidden
if (!testing_cuda_graph_) {
CheckMemLeak(place_);
}
}
bool testing_cuda_graph_{0};
size_t stream_num_;
size_t grid_num_;
size_t block_num_;
size_t data_num_;
size_t workspace_size_;
platform::CUDAPlace place_;
std::vector<gpuStream_t> streams_;
std::vector<std::shared_ptr<phi::Allocation>> workspaces_;
std::vector<std::shared_ptr<phi::Allocation>> results_;
std::vector<std::shared_ptr<phi::Allocation>> host_results_;
};
TEST_F(StreamSafeCUDAAllocTest, CUDAMutilStreamTest) {
MultiStreamRun();
CheckResult();
}
TEST_F(StreamSafeCUDAAllocTest, CUDAMutilThreadMutilStreamTest) {
MultiThreadMultiStreamRun();
CheckResult();
}
#ifdef PADDLE_WITH_CUDA
TEST_F(StreamSafeCUDAAllocTest, CUDAGraphTest) {
MultiStreamRun();
CUDAGraphRun();
CheckResult();
}
#endif
} // namespace memory
} // namespace paddle
|
e36ed9eeef3d993fee2f2c143371ee05954cdd02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void ceil_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = ceil(dy[i]);
}
} | e36ed9eeef3d993fee2f2c143371ee05954cdd02.cu | #include "includes.h"
extern "C"
__global__ void ceil_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = ceil(dy[i]);
}
} |
5899436f9071c89b5b6eafb22ac141046b8c3202.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ptychofft.cuh"
#include "kernels_ptycho.cu"
ptychofft::ptychofft(size_t ptheta, size_t nz, size_t n,
size_t nscan, size_t ndet, size_t nprb, size_t ngpus)
: ptheta(ptheta), nz(nz), n(n), nscan(nscan), ndet(ndet), nprb(nprb), ngpus(ngpus) {
int ffts[2];
int idist;int odist;
int inembed[2];int onembed[2];
ffts[0] = ndet; ffts[1] = ndet;
idist = ndet*ndet; odist = ndet*ndet;
inembed[0] = ndet; inembed[1] = ndet;
onembed[0] = ndet; onembed[1] = ndet;
plan2d = new hipfftHandle[ngpus];
shiftx = new float2*[ngpus];
shifty = new float2*[ngpus];
for (int igpu=0;igpu<ngpus;igpu++)
{
hipSetDevice(igpu);
hipMalloc((void**)&shiftx[igpu],ptheta*nscan*sizeof(float2));
hipMalloc((void**)&shifty[igpu],ptheta*nscan*sizeof(float2));
hipfftPlanMany(&plan2d[igpu], 2, ffts, inembed, 1, idist, onembed, 1, odist, HIPFFT_C2C, ptheta*nscan);
}
hipSetDevice(0);
}
ptychofft::~ptychofft(){free();}
void ptychofft::free()
{
if (!is_free)
{
for (int igpu=0;igpu<ngpus;igpu++)
{
hipSetDevice(igpu);
hipFree(shiftx[igpu]);
hipFree(shifty[igpu]);
hipfftDestroy(plan2d[igpu]);
}
is_free = true;
hipSetDevice(0);
}
}
void ptychofft::fwd(size_t g_, size_t f_, size_t prb_, size_t scan_, size_t igpu)
{
hipSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
hipLaunchKernelGGL(( mul), dim3(GS3d0),dim3(BS3d), 0, 0, g,f,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
hipfftExecC2C(plan2d[igpu], (hipfftComplex*)g,(hipfftComplex*)g,HIPFFT_FORWARD);
hipLaunchKernelGGL(( takeshifts), dim3(GS2d0),dim3(BS3d), 0, 0, shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
hipLaunchKernelGGL(( shifts), dim3(GS3d1),dim3(BS3d), 0, 0, g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
}
void ptychofft::adj(size_t f_, size_t g_, size_t prb_, size_t scan_, size_t igpu)
{
hipSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
hipLaunchKernelGGL(( takeshifts), dim3(GS2d0),dim3(BS3d), 0, 0, shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
hipLaunchKernelGGL(( shiftsa), dim3(GS3d1),dim3(BS3d), 0, 0, g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
hipfftExecC2C(plan2d[igpu], (hipfftComplex*)g,(hipfftComplex*)g,HIPFFT_BACKWARD);
hipLaunchKernelGGL(( mula), dim3(GS3d0),dim3(BS3d), 0, 0, f,g,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
}
void ptychofft::adjprb(size_t prb_, size_t g_, size_t f_, size_t scan_, size_t igpu)
{
hipSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
hipLaunchKernelGGL(( takeshifts), dim3(GS2d0),dim3(BS3d), 0, 0, shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
hipLaunchKernelGGL(( shiftsa), dim3(GS3d1),dim3(BS3d), 0, 0, g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
hipfftExecC2C(plan2d[igpu], (hipfftComplex*)g,(hipfftComplex*)g,HIPFFT_BACKWARD);
hipLaunchKernelGGL(( mulaprb), dim3(GS3d0),dim3(BS3d), 0, 0, f,g,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
}
| 5899436f9071c89b5b6eafb22ac141046b8c3202.cu | #include "ptychofft.cuh"
#include "kernels_ptycho.cu"
ptychofft::ptychofft(size_t ptheta, size_t nz, size_t n,
size_t nscan, size_t ndet, size_t nprb, size_t ngpus)
: ptheta(ptheta), nz(nz), n(n), nscan(nscan), ndet(ndet), nprb(nprb), ngpus(ngpus) {
int ffts[2];
int idist;int odist;
int inembed[2];int onembed[2];
ffts[0] = ndet; ffts[1] = ndet;
idist = ndet*ndet; odist = ndet*ndet;
inembed[0] = ndet; inembed[1] = ndet;
onembed[0] = ndet; onembed[1] = ndet;
plan2d = new cufftHandle[ngpus];
shiftx = new float2*[ngpus];
shifty = new float2*[ngpus];
for (int igpu=0;igpu<ngpus;igpu++)
{
cudaSetDevice(igpu);
cudaMalloc((void**)&shiftx[igpu],ptheta*nscan*sizeof(float2));
cudaMalloc((void**)&shifty[igpu],ptheta*nscan*sizeof(float2));
cufftPlanMany(&plan2d[igpu], 2, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_C2C, ptheta*nscan);
}
cudaSetDevice(0);
}
ptychofft::~ptychofft(){free();}
void ptychofft::free()
{
if (!is_free)
{
for (int igpu=0;igpu<ngpus;igpu++)
{
cudaSetDevice(igpu);
cudaFree(shiftx[igpu]);
cudaFree(shifty[igpu]);
cufftDestroy(plan2d[igpu]);
}
is_free = true;
cudaSetDevice(0);
}
}
void ptychofft::fwd(size_t g_, size_t f_, size_t prb_, size_t scan_, size_t igpu)
{
cudaSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
mul<<<GS3d0,BS3d>>>(g,f,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
cufftExecC2C(plan2d[igpu], (cufftComplex*)g,(cufftComplex*)g,CUFFT_FORWARD);
takeshifts<<<GS2d0,BS3d>>>(shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
shifts<<<GS3d1,BS3d>>>(g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
}
void ptychofft::adj(size_t f_, size_t g_, size_t prb_, size_t scan_, size_t igpu)
{
cudaSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
takeshifts<<<GS2d0,BS3d>>>(shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
shiftsa<<<GS3d1,BS3d>>>(g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
cufftExecC2C(plan2d[igpu], (cufftComplex*)g,(cufftComplex*)g,CUFFT_INVERSE);
mula<<<GS3d0,BS3d>>>(f,g,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
}
void ptychofft::adjprb(size_t prb_, size_t g_, size_t f_, size_t scan_, size_t igpu)
{
cudaSetDevice(igpu);
dim3 BS3d(32,32,1);
dim3 GS3d0(ceil(nprb*nprb/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS3d1(ceil(ndet*ndet/(float)BS3d.x),ceil(nscan/(float)BS3d.y),ceil(ptheta/(float)BS3d.z));
dim3 GS2d0(ceil(nscan/(float)BS3d.x),ceil(ptheta/(float)BS3d.y));
float2* f = (float2*)f_;
float2* g = (float2*)g_;
float2* prb = (float2*)prb_;
float* scany = (float*)&((float*)scan_)[0];
float* scanx = (float*)&((float*)scan_)[ptheta*nscan];
takeshifts<<<GS2d0,BS3d>>>(shiftx[igpu],shifty[igpu],scanx,scany,ptheta,nscan);
shiftsa<<<GS3d1,BS3d>>>(g, shiftx[igpu], shifty[igpu], ptheta, nscan, ndet*ndet);
cufftExecC2C(plan2d[igpu], (cufftComplex*)g,(cufftComplex*)g,CUFFT_INVERSE);
mulaprb<<<GS3d0,BS3d>>>(f,g,prb,scanx,scany,ptheta,nz,n,nscan,nprb,ndet,ndet);
}
|
3be96e1c00c453ca90231e0f4ab47ac5453a3fc3.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <hip/hip_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
void test_cuda_nullary() {
Tensor<std::complex<float>, 1, 0, int> in1(2);
Tensor<std::complex<float>, 1, 0, int> in2(2);
in1.setRandom();
in2.setRandom();
std::size_t float_bytes = in1.size() * sizeof(float);
std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>);
std::complex<float>* d_in1;
std::complex<float>* d_in2;
float* d_out2;
hipMalloc((void**)(&d_in1), complex_bytes);
hipMalloc((void**)(&d_in2), complex_bytes);
hipMalloc((void**)(&d_out2), float_bytes);
hipMemcpy(d_in1, in1.data(), complex_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2.data(), complex_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, 2);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, 2);
Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2(
d_out2, 2);
gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f));
gpu_out2.device(gpu_device) = gpu_in2.abs();
Tensor<std::complex<float>, 1, 0, int> new1(2);
Tensor<float, 1, 0, int> new2(2);
assert(hipMemcpyAsync(new1.data(), d_in1, complex_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipMemcpyAsync(new2.data(), d_out2, float_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f));
VERIFY_IS_APPROX(new2(i), std::abs(in2(i)));
}
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out2);
}
static void test_cuda_sum_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_product_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.prod();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.prod();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
void test_cxx11_tensor_complex()
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}
| 3be96e1c00c453ca90231e0f4ab47ac5453a3fc3.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <cuda_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
void test_cuda_nullary() {
Tensor<std::complex<float>, 1, 0, int> in1(2);
Tensor<std::complex<float>, 1, 0, int> in2(2);
in1.setRandom();
in2.setRandom();
std::size_t float_bytes = in1.size() * sizeof(float);
std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>);
std::complex<float>* d_in1;
std::complex<float>* d_in2;
float* d_out2;
cudaMalloc((void**)(&d_in1), complex_bytes);
cudaMalloc((void**)(&d_in2), complex_bytes);
cudaMalloc((void**)(&d_out2), float_bytes);
cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, 2);
Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, 2);
Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2(
d_out2, 2);
gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f));
gpu_out2.device(gpu_device) = gpu_in2.abs();
Tensor<std::complex<float>, 1, 0, int> new1(2);
Tensor<float, 1, 0, int> new2(2);
assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < 2; ++i) {
VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f));
VERIFY_IS_APPROX(new2(i), std::abs(in2(i)));
}
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out2);
}
static void test_cuda_sum_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_cuda_product_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<std::complex<float>, 2> in(num_rows, num_cols);
in.setRandom();
Tensor<std::complex<float>, 0> full_redux;
full_redux = in.prod();
std::size_t in_bytes = in.size() * sizeof(std::complex<float>);
std::size_t out_bytes = full_redux.size() * sizeof(std::complex<float>);
std::complex<float>* gpu_in_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(in_bytes));
std::complex<float>* gpu_out_ptr = static_cast<std::complex<float>*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<std::complex<float>, 0> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.prod();
Tensor<std::complex<float>, 0> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
void test_cxx11_tensor_complex()
{
CALL_SUBTEST(test_cuda_nullary());
CALL_SUBTEST(test_cuda_sum_reductions());
CALL_SUBTEST(test_cuda_product_reductions());
}
|
2ccab8c013dbd35491866694c726623b98a7f36d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_mulf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
} | 2ccab8c013dbd35491866694c726623b98a7f36d.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_mulf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
} |
09c336a61e01509044464f88c3fea9095b836b96.hip | // !!! This is a file automatically generated by hipify!!!
// Histogram Equalization
/*
usage:
*/
#define TIMER_OK
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#include "../include/wb.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/scan.h>
/* general define for cuda, OBS: maybe not suitable for 3D */
#define Grid_width gridDim.x
#define Grid_height gridDim.y
#define Grid_pages gridDim.z
#define Grid_size (Grid_width * Grid_height)
#define Block_width blockDim.x
#define Block_height blockDim.y
#define Block_pages blockDim.z
#define BLOCK_SIZE (Block_width * Block_height)
#define localId_colIndex threadIdx.x
#define localId_rowIndex threadIdx.y
#define localId_overallIndex2D (localId_rowIndex * Block_width + localId_colIndex)
#define blockId_colIndex blockIdx.x
#define blockId_rowIndex blockIdx.y
#define blockId_overallIndex2D (blockId_rowIndex * Grid_width + blockId_colIndex)
#define globalId_colIndex (blockId_colIndex * Block_width + localId_colIndex)
#define globalId_rowIndex (blockId_rowIndex * Block_height + localId_rowIndex)
#define globalId_overallIndex2D (globalId_rowIndex * Block_width*Grid_width + globalId_colIndex)
#define __syncdevice hipDeviceSynchronize
/* specific define for this project */
#define Tile_width 16 // obs: related with HISTOGRAM_LENGTH
#define HISTOGRAM_LENGTH 256 // set to 256 to easy hist related thread actions. when HISTOGRAM_LENGTH < 256, hard to code. when HISTOGRAM_LENGTH > 256, need to add "if" statements
#define Channel_nr 3
#define Debug 1
#define Scan_alternative 0
//@@ insert kernel code here
__global__
void preScan_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] = 0.0;
__shared__ int pd_shared_hist[256]; pd_shared_hist[localId_overallIndex2D] = 0;
float d_onePixelAllChannels[3];
// load input and convert to unsigned char & calculate to local pd_shared_hist
__syncthreads();
if ((globalId_rowIndex < imageColHeight) && (globalId_colIndex < imageRowWidth)){
int onePixelStartIndex = (globalId_rowIndex * imageRowWidth + globalId_colIndex) * Channel_nr;
for (int channelIndex = 0; channelIndex < Channel_nr; channelIndex++){
d_onePixelAllChannels[channelIndex] = (unsigned char)(255 * pd_inputData[onePixelStartIndex + channelIndex]);
}
unsigned char t_grayValueAsCharAsIndex = (unsigned char)(0.21 * d_onePixelAllChannels[0] + 0.71 * d_onePixelAllChannels[1] + 0.07 * d_onePixelAllChannels[2]);
//calculate to local pd_shared_hist
atomicAdd(&pd_shared_hist[t_grayValueAsCharAsIndex], 1);
}
// division ; then atom-write to global hist probility pd_hist_dividedToFloatProbility pdf
__syncthreads();
float t_localHistOneFloatValue = ((float)(pd_shared_hist[localId_overallIndex2D])) / (imageRowWidth * imageColHeight);
atomicAdd(&pd_hist_dividedToFloatProbility[localId_overallIndex2D], t_localHistOneFloatValue);
// scan global hist probility pd_hist_dividedToFloatProbility to cdf
__syncthreads();
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^0;
}
__global__
void scan_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
__shared__ float pd_shared_histFloat[256];
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^1;
if (blockId_overallIndex2D == 0){
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^2;
int stride;
int perThread_index;
// load hist from global to local shared mem
pd_shared_histFloat[localId_overallIndex2D] = pd_hist_dividedToFloatProbility[localId_overallIndex2D];
__syncthreads();
// use only 128 threads to do scan
//if (localId_overallIndex2D < 128){ // __syncthreads() can NOT be inside if statement. // http://stackoverflow.com/questions/12519573/cuda-syncthreads-inside-if-statements
// scan-> reduce
for (stride = 1; stride <= 128; stride *= 2){
__syncthreads();
perThread_index = (localId_overallIndex2D + 1) * (stride * 2) - 1;
if (perThread_index < 256){
if (localId_overallIndex2D < 128)
pd_shared_histFloat[perThread_index] += pd_shared_histFloat[perThread_index - stride];
}
}
// scan-> reverse reduce
__syncthreads();
for (stride = 128 / 2; stride >= 1; stride /= 2){
__syncthreads();
perThread_index = (localId_overallIndex2D + 1) * (stride * 2) - 1;
if (perThread_index + stride < 256){
if (localId_overallIndex2D < 128)
pd_shared_histFloat[perThread_index + stride] += pd_shared_histFloat[perThread_index];
}
}
//} // if (localId_overallIndex2D < 128) // __syncthreads() can NOT be inside if statement. // http://stackoverflow.com/questions/12519573/cuda-syncthreads-inside-if-statements
// save back to global mem
__syncthreads();
if (pd_shared_histFloat[localId_overallIndex2D] > 0.0){
atomicMin(pd_minMax, pd_shared_histFloat[localId_overallIndex2D]);
}
__syncthreads();
pd_hist_dividedToFloatProbility[localId_overallIndex2D] = pd_shared_histFloat[localId_overallIndex2D];
} // if (block == 0){
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^3;
__syncthreads();
}
__global__ // normalization
void normalization_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^4;
__shared__ float pd_shared_histFloat[256];
//// load cdf hist from global to local shared mem
pd_shared_histFloat[localId_overallIndex2D] = pd_hist_dividedToFloatProbility[localId_overallIndex2D];
__syncthreads();
//// normalize
if ((globalId_rowIndex < imageColHeight) && (globalId_colIndex < imageRowWidth)){
int onePixelStartIndex = (globalId_rowIndex * imageRowWidth + globalId_colIndex) * Channel_nr;
for (int channelIndex = 0; channelIndex < Channel_nr; channelIndex++){
unsigned char colorValueAsCharAsIndex = (unsigned char)(255 * pd_inputData[onePixelStartIndex + channelIndex]);
unsigned char newColorValue = 255 * (pd_shared_histFloat[colorValueAsCharAsIndex] - pd_minMax[0]) / (pd_shared_histFloat[255] - pd_minMax[0]);
pd_outputData[onePixelStartIndex + channelIndex] = (float)newColorValue / 256.0f;
}
}
__syncthreads();
}
int main(int argc, char ** argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage_struct;
wbImage_t outputImage_struct;
const char * inputImage_structFile;
//@@ Insert more code here
float * ph_inputData;
float * ph_outputData;
float * pd_hist_dividedToFloatProbility;
float ph_hist_dividedToFloatProbility[256];
float * pd_inputData;
float * pd_outputData;
float ph_minMax[2]; ph_minMax[0] = 1.0; ph_minMax[1] = 0.0;
float * pd_minMax;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImage_structFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage_struct = wbImport(inputImage_structFile);
imageWidth = wbImage_getWidth(inputImage_struct);
imageHeight = wbImage_getHeight(inputImage_struct);
imageChannels = wbImage_getChannels(inputImage_struct);
outputImage_struct = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "imageWidth: ", imageWidth);
wbLog(TRACE, "imageHeight: ", imageHeight);
wbLog(TRACE, "imageChannels: ", imageChannels);
//@@ insert code here
ph_inputData = wbImage_getData(inputImage_struct);
ph_outputData = wbImage_getData(outputImage_struct);
wbTime_start(GPU, "Doing GPU/CPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
wbCheck(hipMalloc((void **)&pd_inputData, imageWidth * imageHeight * imageChannels * sizeof(float)));
wbCheck(hipMalloc((void **)&pd_outputData, imageWidth * imageHeight * imageChannels * sizeof(float)));
wbCheck(hipMalloc((void **)&pd_hist_dividedToFloatProbility, 256 * sizeof(float)));
wbCheck(hipMalloc((void **)&pd_minMax, 2 * sizeof(float)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
wbCheck(hipMemcpy(pd_inputData, ph_inputData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice));
wbCheck(hipMemcpy(pd_minMax, ph_minMax, 2 * sizeof(float), hipMemcpyHostToDevice));
wbCheck(hipMemset(pd_hist_dividedToFloatProbility, 0, 256 * sizeof(float)));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU preScan_global");
//@@ INSERT CODE HERE
dim3 DimBlock(Tile_width, Tile_width, 1);
dim3 DimGrid((imageWidth - 1) / Tile_width + 1, (imageHeight - 1) / Tile_width + 1, 1);
hipLaunchKernelGGL(( preScan_global) , dim3(DimGrid), dim3(DimBlock) , 0, 0, pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Doing the computation on the GPU preScan_global");
hipDeviceSynchronize();
hipDeviceSynchronize();
#if Debug
wbCheck(hipMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < 256; i++){
wbLog(TRACE, "pdf hist ", i, ": ", ph_hist_dividedToFloatProbility[i]);
}
#endif
#if Scan_alternative
wbCheck(hipMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), hipMemcpyDeviceToHost));
wbTime_start(Compute, "Scan by thrust");
thrust::inclusive_scan(ph_hist_dividedToFloatProbility, ph_hist_dividedToFloatProbility + 256, ph_hist_dividedToFloatProbility);
wbTime_stop(Compute, "Scan by thrust");
wbCheck(hipMemcpy(pd_hist_dividedToFloatProbility, ph_hist_dividedToFloatProbility, 256 * sizeof(float), hipMemcpyHostToDevice));
hipDeviceSynchronize();
hipDeviceSynchronize();
ph_minMax[0] = 1.0;
for (int i = 0; i < 256; i++){
if (ph_hist_dividedToFloatProbility[i] > 0){
ph_minMax[0] = min(ph_minMax[0], ph_hist_dividedToFloatProbility[i]);
}
wbLog(TRACE, "ph_minMax[0] ", ph_minMax[0]);
}
wbCheck(hipMemcpy(pd_minMax, ph_minMax, 1 * sizeof(float), hipMemcpyHostToDevice));
#else
ph_minMax[0] = 0.0f;
wbCheck(hipMemcpy(pd_minMax, ph_minMax, 1 * sizeof(float), hipMemcpyHostToDevice));
hipDeviceSynchronize();
hipDeviceSynchronize();
wbTime_start(Compute, "Scan by GPU");
hipLaunchKernelGGL(( scan_global) , dim3(DimGrid), dim3(DimBlock) , 0, 0, pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Scan by GPU");
#endif
#if Debug
wbCheck(hipMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < 256; i++){
wbLog(TRACE, "cdf hist ", i, ": ", ph_hist_dividedToFloatProbility[i]);
}
#endif
hipDeviceSynchronize();
hipDeviceSynchronize();
wbTime_start(Compute, "Doing the computation on the GPU normalization_global");
hipLaunchKernelGGL(( normalization_global) , dim3(DimGrid), dim3(DimBlock) , 0, 0, pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Doing the computation on the GPU normalization_global");
hipDeviceSynchronize();
hipDeviceSynchronize();
wbTime_start(Copy, "Copying data from the GPU");
wbCheck(hipMemcpy(ph_outputData, pd_outputData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU/CPU Computation (memory + compute)");
#if Debug
wbCheck(hipMemcpy(&(ph_minMax[1]), &(pd_minMax[1]), 1 * sizeof(float), hipMemcpyDeviceToHost));
wbLog(TRACE, "ph_minMax[0]: ", ph_minMax[0]);
wbLog(TRACE, "ph_minMax[1]: ", ph_minMax[1]);
#endif
wbSolution(args, outputImage_struct);
//@@ insert code here
wbCheck(hipFree(pd_inputData));
wbCheck(hipFree(pd_outputData));
wbCheck(hipFree(pd_hist_dividedToFloatProbility));
wbCheck(hipFree(pd_minMax));
return 0;
}
| 09c336a61e01509044464f88c3fea9095b836b96.cu | // Histogram Equalization
/*
usage:
*/
#define TIMER_OK
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#include "../include/wb.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/scan.h>
/* general define for cuda, OBS: maybe not suitable for 3D */
#define Grid_width gridDim.x
#define Grid_height gridDim.y
#define Grid_pages gridDim.z
#define Grid_size (Grid_width * Grid_height)
#define Block_width blockDim.x
#define Block_height blockDim.y
#define Block_pages blockDim.z
#define BLOCK_SIZE (Block_width * Block_height)
#define localId_colIndex threadIdx.x
#define localId_rowIndex threadIdx.y
#define localId_overallIndex2D (localId_rowIndex * Block_width + localId_colIndex)
#define blockId_colIndex blockIdx.x
#define blockId_rowIndex blockIdx.y
#define blockId_overallIndex2D (blockId_rowIndex * Grid_width + blockId_colIndex)
#define globalId_colIndex (blockId_colIndex * Block_width + localId_colIndex)
#define globalId_rowIndex (blockId_rowIndex * Block_height + localId_rowIndex)
#define globalId_overallIndex2D (globalId_rowIndex * Block_width*Grid_width + globalId_colIndex)
#define __syncdevice cudaDeviceSynchronize
/* specific define for this project */
#define Tile_width 16 // obs: related with HISTOGRAM_LENGTH
#define HISTOGRAM_LENGTH 256 // set to 256 to easy hist related thread actions. when HISTOGRAM_LENGTH < 256, hard to code. when HISTOGRAM_LENGTH > 256, need to add "if" statements
#define Channel_nr 3
#define Debug 1
#define Scan_alternative 0
//@@ insert kernel code here
__global__
void preScan_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] = 0.0;
__shared__ int pd_shared_hist[256]; pd_shared_hist[localId_overallIndex2D] = 0;
float d_onePixelAllChannels[3];
// load input and convert to unsigned char & calculate to local pd_shared_hist
__syncthreads();
if ((globalId_rowIndex < imageColHeight) && (globalId_colIndex < imageRowWidth)){
int onePixelStartIndex = (globalId_rowIndex * imageRowWidth + globalId_colIndex) * Channel_nr;
for (int channelIndex = 0; channelIndex < Channel_nr; channelIndex++){
d_onePixelAllChannels[channelIndex] = (unsigned char)(255 * pd_inputData[onePixelStartIndex + channelIndex]);
}
unsigned char t_grayValueAsCharAsIndex = (unsigned char)(0.21 * d_onePixelAllChannels[0] + 0.71 * d_onePixelAllChannels[1] + 0.07 * d_onePixelAllChannels[2]);
//calculate to local pd_shared_hist
atomicAdd(&pd_shared_hist[t_grayValueAsCharAsIndex], 1);
}
// division ; then atom-write to global hist probility pd_hist_dividedToFloatProbility pdf
__syncthreads();
float t_localHistOneFloatValue = ((float)(pd_shared_hist[localId_overallIndex2D])) / (imageRowWidth * imageColHeight);
atomicAdd(&pd_hist_dividedToFloatProbility[localId_overallIndex2D], t_localHistOneFloatValue);
// scan global hist probility pd_hist_dividedToFloatProbility to cdf
__syncthreads();
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^0;
}
__global__
void scan_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
__shared__ float pd_shared_histFloat[256];
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^1;
if (blockId_overallIndex2D == 0){
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^2;
int stride;
int perThread_index;
// load hist from global to local shared mem
pd_shared_histFloat[localId_overallIndex2D] = pd_hist_dividedToFloatProbility[localId_overallIndex2D];
__syncthreads();
// use only 128 threads to do scan
//if (localId_overallIndex2D < 128){ // __syncthreads() can NOT be inside if statement. // http://stackoverflow.com/questions/12519573/cuda-syncthreads-inside-if-statements
// scan-> reduce
for (stride = 1; stride <= 128; stride *= 2){
__syncthreads();
perThread_index = (localId_overallIndex2D + 1) * (stride * 2) - 1;
if (perThread_index < 256){
if (localId_overallIndex2D < 128)
pd_shared_histFloat[perThread_index] += pd_shared_histFloat[perThread_index - stride];
}
}
// scan-> reverse reduce
__syncthreads();
for (stride = 128 / 2; stride >= 1; stride /= 2){
__syncthreads();
perThread_index = (localId_overallIndex2D + 1) * (stride * 2) - 1;
if (perThread_index + stride < 256){
if (localId_overallIndex2D < 128)
pd_shared_histFloat[perThread_index + stride] += pd_shared_histFloat[perThread_index];
}
}
//} // if (localId_overallIndex2D < 128) // __syncthreads() can NOT be inside if statement. // http://stackoverflow.com/questions/12519573/cuda-syncthreads-inside-if-statements
// save back to global mem
__syncthreads();
if (pd_shared_histFloat[localId_overallIndex2D] > 0.0){
atomicMin(pd_minMax, pd_shared_histFloat[localId_overallIndex2D]);
}
__syncthreads();
pd_hist_dividedToFloatProbility[localId_overallIndex2D] = pd_shared_histFloat[localId_overallIndex2D];
} // if (block == 0){
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^3;
__syncthreads();
}
__global__ // normalization
void normalization_global(float *pd_inputData, float *pd_outputData, float * pd_hist_dividedToFloatProbility, float * pd_minMax, int imageRowWidth, int imageColHeight) {
if (Debug && globalId_overallIndex2D == 0) pd_minMax[1] += 2^4;
__shared__ float pd_shared_histFloat[256];
//// load cdf hist from global to local shared mem
pd_shared_histFloat[localId_overallIndex2D] = pd_hist_dividedToFloatProbility[localId_overallIndex2D];
__syncthreads();
//// normalize
if ((globalId_rowIndex < imageColHeight) && (globalId_colIndex < imageRowWidth)){
int onePixelStartIndex = (globalId_rowIndex * imageRowWidth + globalId_colIndex) * Channel_nr;
for (int channelIndex = 0; channelIndex < Channel_nr; channelIndex++){
unsigned char colorValueAsCharAsIndex = (unsigned char)(255 * pd_inputData[onePixelStartIndex + channelIndex]);
unsigned char newColorValue = 255 * (pd_shared_histFloat[colorValueAsCharAsIndex] - pd_minMax[0]) / (pd_shared_histFloat[255] - pd_minMax[0]);
pd_outputData[onePixelStartIndex + channelIndex] = (float)newColorValue / 256.0f;
}
}
__syncthreads();
}
int main(int argc, char ** argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage_struct;
wbImage_t outputImage_struct;
const char * inputImage_structFile;
//@@ Insert more code here
float * ph_inputData;
float * ph_outputData;
float * pd_hist_dividedToFloatProbility;
float ph_hist_dividedToFloatProbility[256];
float * pd_inputData;
float * pd_outputData;
float ph_minMax[2]; ph_minMax[0] = 1.0; ph_minMax[1] = 0.0;
float * pd_minMax;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImage_structFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage_struct = wbImport(inputImage_structFile);
imageWidth = wbImage_getWidth(inputImage_struct);
imageHeight = wbImage_getHeight(inputImage_struct);
imageChannels = wbImage_getChannels(inputImage_struct);
outputImage_struct = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "imageWidth: ", imageWidth);
wbLog(TRACE, "imageHeight: ", imageHeight);
wbLog(TRACE, "imageChannels: ", imageChannels);
//@@ insert code here
ph_inputData = wbImage_getData(inputImage_struct);
ph_outputData = wbImage_getData(outputImage_struct);
wbTime_start(GPU, "Doing GPU/CPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
wbCheck(cudaMalloc((void **)&pd_inputData, imageWidth * imageHeight * imageChannels * sizeof(float)));
wbCheck(cudaMalloc((void **)&pd_outputData, imageWidth * imageHeight * imageChannels * sizeof(float)));
wbCheck(cudaMalloc((void **)&pd_hist_dividedToFloatProbility, 256 * sizeof(float)));
wbCheck(cudaMalloc((void **)&pd_minMax, 2 * sizeof(float)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
wbCheck(cudaMemcpy(pd_inputData, ph_inputData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice));
wbCheck(cudaMemcpy(pd_minMax, ph_minMax, 2 * sizeof(float), cudaMemcpyHostToDevice));
wbCheck(cudaMemset(pd_hist_dividedToFloatProbility, 0, 256 * sizeof(float)));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU preScan_global");
//@@ INSERT CODE HERE
dim3 DimBlock(Tile_width, Tile_width, 1);
dim3 DimGrid((imageWidth - 1) / Tile_width + 1, (imageHeight - 1) / Tile_width + 1, 1);
preScan_global <<< DimGrid, DimBlock >>> (pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Doing the computation on the GPU preScan_global");
cudaThreadSynchronize();
cudaDeviceSynchronize();
#if Debug
wbCheck(cudaMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < 256; i++){
wbLog(TRACE, "pdf hist ", i, ": ", ph_hist_dividedToFloatProbility[i]);
}
#endif
#if Scan_alternative
wbCheck(cudaMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), cudaMemcpyDeviceToHost));
wbTime_start(Compute, "Scan by thrust");
thrust::inclusive_scan(ph_hist_dividedToFloatProbility, ph_hist_dividedToFloatProbility + 256, ph_hist_dividedToFloatProbility);
wbTime_stop(Compute, "Scan by thrust");
wbCheck(cudaMemcpy(pd_hist_dividedToFloatProbility, ph_hist_dividedToFloatProbility, 256 * sizeof(float), cudaMemcpyHostToDevice));
cudaThreadSynchronize();
cudaDeviceSynchronize();
ph_minMax[0] = 1.0;
for (int i = 0; i < 256; i++){
if (ph_hist_dividedToFloatProbility[i] > 0){
ph_minMax[0] = min(ph_minMax[0], ph_hist_dividedToFloatProbility[i]);
}
wbLog(TRACE, "ph_minMax[0] ", ph_minMax[0]);
}
wbCheck(cudaMemcpy(pd_minMax, ph_minMax, 1 * sizeof(float), cudaMemcpyHostToDevice));
#else
ph_minMax[0] = 0.0f;
wbCheck(cudaMemcpy(pd_minMax, ph_minMax, 1 * sizeof(float), cudaMemcpyHostToDevice));
cudaThreadSynchronize();
cudaDeviceSynchronize();
wbTime_start(Compute, "Scan by GPU");
scan_global <<< DimGrid, DimBlock >>> (pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Scan by GPU");
#endif
#if Debug
wbCheck(cudaMemcpy(ph_hist_dividedToFloatProbility, pd_hist_dividedToFloatProbility, 256 * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < 256; i++){
wbLog(TRACE, "cdf hist ", i, ": ", ph_hist_dividedToFloatProbility[i]);
}
#endif
cudaThreadSynchronize();
cudaDeviceSynchronize();
wbTime_start(Compute, "Doing the computation on the GPU normalization_global");
normalization_global <<< DimGrid, DimBlock >>> (pd_inputData, pd_outputData, pd_hist_dividedToFloatProbility, pd_minMax, imageWidth, imageHeight); __syncdevice();
wbTime_stop(Compute, "Doing the computation on the GPU normalization_global");
cudaThreadSynchronize();
cudaDeviceSynchronize();
wbTime_start(Copy, "Copying data from the GPU");
wbCheck(cudaMemcpy(ph_outputData, pd_outputData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU/CPU Computation (memory + compute)");
#if Debug
wbCheck(cudaMemcpy(&(ph_minMax[1]), &(pd_minMax[1]), 1 * sizeof(float), cudaMemcpyDeviceToHost));
wbLog(TRACE, "ph_minMax[0]: ", ph_minMax[0]);
wbLog(TRACE, "ph_minMax[1]: ", ph_minMax[1]);
#endif
wbSolution(args, outputImage_struct);
//@@ insert code here
wbCheck(cudaFree(pd_inputData));
wbCheck(cudaFree(pd_outputData));
wbCheck(cudaFree(pd_hist_dividedToFloatProbility));
wbCheck(cudaFree(pd_minMax));
return 0;
}
|
251641c2b9763152a58ecac3425949443bdf6bcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "LatinSquares.cuh"
#include "Utils.h"
#include "MIN.h"
#include <math.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <time.h>
constexpr auto OUTFILE1 = "./Results/results-ls.txt";
constexpr auto OUTFILE2 = "./Results/results-mols.txt";
constexpr auto DEBUG_LOG = "./Results/log.txt";
constexpr auto CROSS_SEARCH = true;
#define CUDA_ERROR_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "%s: %s %s %d\n", hipGetErrorName(code), hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void cuda_handle_error() {
auto err = hipGetLastError();
printf("%s: %s\n", hipGetErrorName(err), hipGetErrorString(err));
}
void cross_method_search() {
const unsigned int N = 16;
const unsigned int SWITCHES = N / 2;
const unsigned int STAGES = (2 * log2(N)) - 1;
const unsigned int MOLS_GRID_PARALLEL = 200;
const unsigned int MOLS_SEQ_CUTOFF = 100;
const unsigned int NUM_LATIN_SQUARES = MOLS_GRID_PARALLEL * MOLS_SEQ_CUTOFF;
const bool DEBUG = false;
freopen(DEBUG_LOG, "w+", stdout);
/// COMPUTE LATIN SQUARES
printf("Computing latin squares...\n");
auto start_ls = clock();
int* topology = new int[16 * 6];
make_butterfly_butterfly_topology(topology);
bool* char_mat = new bool[16 * 8 * 7];
bool* rot_mat = new bool[16 * 8 * 7];
make_characteristic_matrices(0, char_mat);
generate_rotation_configurations(topology, rot_mat);
bool* dev_char_mat;
bool* dev_rot_mat;
int* dev_topology;
bool* dev_conf_char;
bool* dev_conf_rot;
bool* dev_is_latin_square_char;
bool* dev_is_latin_square_rot;
int* dev_perm_char;
int* dev_perm_rot;
hiprandState_t* dev_states1;
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_char_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_rot_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_topology, (16 * 6) * sizeof(int)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_conf_char, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_is_latin_square_char, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_perm_char, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_conf_rot, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_is_latin_square_rot, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_perm_rot, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_states1, NUM_LATIN_SQUARES * sizeof(hiprandState_t)));
CUDA_ERROR_CHECK(hipMemcpy(dev_char_mat, char_mat, (16 * 8 * 7) * sizeof(bool), hipMemcpyHostToDevice));
CUDA_ERROR_CHECK(hipMemcpy(dev_rot_mat, rot_mat, (16 * 8 * 7) * sizeof(bool), hipMemcpyHostToDevice));
CUDA_ERROR_CHECK(hipMemcpy(dev_topology, topology, (16 * 6) * sizeof(int), hipMemcpyHostToDevice));
setup_rand_state << <NUM_LATIN_SQUARES, 1 >> > (dev_states1);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (
dev_states1, dev_char_mat, dev_topology, dev_conf_char, dev_is_latin_square_char, dev_perm_char
);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (
dev_states1, dev_rot_mat, dev_topology, dev_conf_rot, dev_is_latin_square_rot, dev_perm_rot
);
cuda_handle_error();
delete[] topology;
delete[] char_mat;
delete[] rot_mat;
CUDA_ERROR_CHECK(hipFree(dev_char_mat));
CUDA_ERROR_CHECK(hipFree(dev_rot_mat));
CUDA_ERROR_CHECK(hipFree(dev_topology));
CUDA_ERROR_CHECK(hipFree(dev_states1));
bool* out_is_ls_char = new bool[NUM_LATIN_SQUARES];
bool* out_is_ls_rot = new bool[NUM_LATIN_SQUARES];
bool* out_conf_char = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
bool* out_conf_rot = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
int* out_perm_char = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
int* out_perm_rot = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
CUDA_ERROR_CHECK(hipMemcpy(out_is_ls_char, dev_is_latin_square_char, NUM_LATIN_SQUARES * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_conf_char, dev_conf_char, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_perm_char, dev_perm_char, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_is_ls_rot, dev_is_latin_square_rot, NUM_LATIN_SQUARES * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_conf_rot, dev_conf_rot, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_perm_rot, dev_perm_rot, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipFree(dev_conf_char));
CUDA_ERROR_CHECK(hipFree(dev_conf_rot));
FILE* fd_ls = fopen(OUTFILE1, "w+");
fprintf(fd_ls, "Latin squares for characteristic matrices:\n\n");
write_output_latin_square(fd_ls, out_is_ls_char, out_conf_char, out_perm_char, NUM_LATIN_SQUARES);
fprintf(fd_ls, "Latin squares for rotation matrices:\n\n");
write_output_latin_square(fd_ls, out_is_ls_rot, out_conf_rot, out_perm_rot, NUM_LATIN_SQUARES);
fclose(fd_ls);
delete[] out_conf_char;
delete[] out_conf_rot;
delete[] out_is_ls_char;
delete[] out_is_ls_rot;
auto end_ls = clock();
printf("Done in %6.4f ms.\n", (double)(end_ls - start_ls) / CLOCKS_PER_SEC);
/// END: COMPUTE LATIN SQUARES
/// COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
printf("Computing MOLS...\n");
auto start_mols = clock();
bool* dev_mols;
int* dev_pairs;
hiprandState_t* dev_states2;
dim3 grid_size;
unsigned int NUM_COMPARISONS = NUM_LATIN_SQUARES * NUM_LATIN_SQUARES;
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
grid_size = dim3(NUM_LATIN_SQUARES, MOLS_GRID_PARALLEL, 1);
check_mols_complete << < grid_size, MOLS_SEQ_CUTOFF >> > (
dev_perm_char, dev_perm_rot, dev_is_latin_square_char, dev_is_latin_square_rot, dev_mols, dev_pairs, DEBUG
);
cuda_handle_error();
CUDA_ERROR_CHECK(hipFree(dev_is_latin_square_char))
CUDA_ERROR_CHECK(hipFree(dev_is_latin_square_rot));
CUDA_ERROR_CHECK(hipFree(dev_perm_char));
CUDA_ERROR_CHECK(hipFree(dev_perm_rot));
bool* out_mols = new bool[NUM_COMPARISONS];
int* out_pairs = new int[NUM_COMPARISONS * 2];
CUDA_ERROR_CHECK(hipMemcpy(out_mols, dev_mols, NUM_COMPARISONS * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_pairs, dev_pairs, NUM_COMPARISONS * 2 * sizeof(int), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipFree(dev_mols));
CUDA_ERROR_CHECK(hipFree(dev_pairs));
FILE* fd_mols = fopen(OUTFILE2, "w+");
write_output_mols(fd_mols, out_mols, out_perm_char, out_perm_rot, out_pairs, NUM_COMPARISONS);
fclose(fd_mols);
auto end_mols = clock();
printf("Done in %6.4f ms.\n", (double)(end_mols - start_mols) / CLOCKS_PER_SEC);
/// END: COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
// release memory
delete[] out_perm_char;
delete[] out_perm_rot;
delete[] out_mols;
delete[] out_pairs;
}
void single_method_search() {
const unsigned int METHOD = 0;
const unsigned int N = 16;
const unsigned int SWITCHES = N / 2;
const unsigned int STAGES = (2 * log2(N)) - 1;
const unsigned int MOLS_GRID_PARALLEL = 10;
const unsigned int MOLS_SEQ_CUTOFF = 8;
const unsigned int NUM_LATIN_SQUARES = MOLS_GRID_PARALLEL * MOLS_SEQ_CUTOFF;
// const unsigned int BLOCK = 10;
const unsigned int LS_SAMPLES = 200;
const bool DO_COMPLETE_CHECK = true;
const bool DEBUG = true;
freopen(DEBUG_LOG, "w+", stdout);
/// COMPUTE LATIN SQUARES
printf("Computing latin squares...\n");
auto start_ls = clock();
int* topology = new int[16 * 6];
make_butterfly_butterfly_topology(topology);
bool* char_mat = new bool[16 * 8 * 7];
switch (METHOD) {
case 0: make_characteristic_matrices(0, char_mat); break;
case 1: generate_rotation_configurations(topology, char_mat); break;
}
bool* dev_char_mat;
int* dev_topology;
bool* dev_conf;
bool* dev_is_latin_square;
int* dev_perm;
hiprandState_t* dev_states1;
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_char_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_topology, (16 * 6) * sizeof(int)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_conf, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_is_latin_square, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_perm, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_states1, NUM_LATIN_SQUARES * sizeof(hiprandState_t)));
CUDA_ERROR_CHECK(hipMemcpy(dev_char_mat, char_mat, (16 * 8 * 7) * sizeof(bool), hipMemcpyHostToDevice));
CUDA_ERROR_CHECK(hipMemcpy(dev_topology, topology, (16 * 6) * sizeof(int), hipMemcpyHostToDevice));
setup_rand_state << <NUM_LATIN_SQUARES, 1 >> > (dev_states1);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (dev_states1, dev_char_mat, dev_topology, dev_conf, dev_is_latin_square, dev_perm);
cuda_handle_error();
delete[] topology;
delete[] char_mat;
CUDA_ERROR_CHECK(hipFree(dev_char_mat));
CUDA_ERROR_CHECK(hipFree(dev_topology));
CUDA_ERROR_CHECK(hipFree(dev_states1));
bool* out_is_ls = new bool[NUM_LATIN_SQUARES];
bool* out_conf = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
int* out_perm = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
CUDA_ERROR_CHECK(hipMemcpy(out_is_ls, dev_is_latin_square, NUM_LATIN_SQUARES * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_conf, dev_conf, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_perm, dev_perm, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipFree(dev_conf));
FILE* fd_ls = fopen(OUTFILE1, "w+");
write_output_latin_square(fd_ls, out_is_ls, out_conf, out_perm, NUM_LATIN_SQUARES);
fclose(fd_ls);
delete[] out_conf;
delete[] out_is_ls;
auto end_ls = clock();
printf("Done in %6.4f ms.\n", (double)(end_ls - start_ls) / CLOCKS_PER_SEC);
/// END: COMPUTE LATIN SQUARES
/// COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
printf("Computing MOLS, complete check: %d...\n", DO_COMPLETE_CHECK);
auto start_mols = clock();
bool* dev_mols;
int* dev_pairs;
hiprandState_t* dev_states2;
dim3 grid_size;
unsigned int NUM_COMPARISONS;
if (DO_COMPLETE_CHECK) {
NUM_COMPARISONS = NUM_LATIN_SQUARES * NUM_LATIN_SQUARES;
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
grid_size = dim3(NUM_LATIN_SQUARES, MOLS_GRID_PARALLEL, 1);
check_mols_complete << < grid_size, MOLS_SEQ_CUTOFF >> > (dev_perm, dev_is_latin_square, dev_mols, dev_pairs, DEBUG);
cuda_handle_error();
CUDA_ERROR_CHECK(hipDeviceSynchronize());
}
else {
NUM_COMPARISONS = NUM_LATIN_SQUARES * LS_SAMPLES;
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_states2, (NUM_COMPARISONS) * sizeof(hiprandState_t)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(hipMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
setup_rand_state << <NUM_LATIN_SQUARES, LS_SAMPLES >> > (dev_states2);
cuda_handle_error();
check_mols_random << < NUM_LATIN_SQUARES, LS_SAMPLES >> > (dev_states2, dev_perm, dev_is_latin_square, dev_mols, dev_pairs, DEBUG);
cuda_handle_error();
CUDA_ERROR_CHECK(hipFree(dev_states2));
}
CUDA_ERROR_CHECK(hipFree(dev_is_latin_square));
CUDA_ERROR_CHECK(hipFree(dev_perm));
bool* out_mols = new bool[NUM_COMPARISONS];
int* out_pairs = new int[NUM_COMPARISONS * 2];
CUDA_ERROR_CHECK(hipMemcpy(out_mols, dev_mols, NUM_COMPARISONS * sizeof(bool), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipMemcpy(out_pairs, dev_pairs, NUM_COMPARISONS * 2 * sizeof(int), hipMemcpyDeviceToHost));
CUDA_ERROR_CHECK(hipFree(dev_mols));
CUDA_ERROR_CHECK(hipFree(dev_pairs));
FILE* fd_mols = fopen(OUTFILE2, "w+");
write_output_mols(fd_mols, out_mols, out_perm, out_pairs, NUM_COMPARISONS);
fclose(fd_mols);
auto end_mols = clock();
printf("Done in %6.4f ms.\n", (double)(end_mols - start_mols) / CLOCKS_PER_SEC);
/// END: COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
// release memory
delete[] out_perm;
delete[] out_mols;
delete[] out_pairs;
}
void cuda_main() {
if (CROSS_SEARCH) {
cross_method_search();
}
else {
single_method_search();
}
}
| 251641c2b9763152a58ecac3425949443bdf6bcb.cu | #include "LatinSquares.cuh"
#include "Utils.h"
#include "MIN.h"
#include <math.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <time.h>
constexpr auto OUTFILE1 = "./Results/results-ls.txt";
constexpr auto OUTFILE2 = "./Results/results-mols.txt";
constexpr auto DEBUG_LOG = "./Results/log.txt";
constexpr auto CROSS_SEARCH = true;
#define CUDA_ERROR_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "%s: %s %s %d\n", cudaGetErrorName(code), cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void cuda_handle_error() {
auto err = cudaGetLastError();
printf("%s: %s\n", cudaGetErrorName(err), cudaGetErrorString(err));
}
void cross_method_search() {
const unsigned int N = 16;
const unsigned int SWITCHES = N / 2;
const unsigned int STAGES = (2 * log2(N)) - 1;
const unsigned int MOLS_GRID_PARALLEL = 200;
const unsigned int MOLS_SEQ_CUTOFF = 100;
const unsigned int NUM_LATIN_SQUARES = MOLS_GRID_PARALLEL * MOLS_SEQ_CUTOFF;
const bool DEBUG = false;
freopen(DEBUG_LOG, "w+", stdout);
/// COMPUTE LATIN SQUARES
printf("Computing latin squares...\n");
auto start_ls = clock();
int* topology = new int[16 * 6];
make_butterfly_butterfly_topology(topology);
bool* char_mat = new bool[16 * 8 * 7];
bool* rot_mat = new bool[16 * 8 * 7];
make_characteristic_matrices(0, char_mat);
generate_rotation_configurations(topology, rot_mat);
bool* dev_char_mat;
bool* dev_rot_mat;
int* dev_topology;
bool* dev_conf_char;
bool* dev_conf_rot;
bool* dev_is_latin_square_char;
bool* dev_is_latin_square_rot;
int* dev_perm_char;
int* dev_perm_rot;
curandState* dev_states1;
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_char_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_rot_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_topology, (16 * 6) * sizeof(int)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_conf_char, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_is_latin_square_char, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_perm_char, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_conf_rot, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_is_latin_square_rot, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_perm_rot, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_states1, NUM_LATIN_SQUARES * sizeof(curandState)));
CUDA_ERROR_CHECK(cudaMemcpy(dev_char_mat, char_mat, (16 * 8 * 7) * sizeof(bool), cudaMemcpyHostToDevice));
CUDA_ERROR_CHECK(cudaMemcpy(dev_rot_mat, rot_mat, (16 * 8 * 7) * sizeof(bool), cudaMemcpyHostToDevice));
CUDA_ERROR_CHECK(cudaMemcpy(dev_topology, topology, (16 * 6) * sizeof(int), cudaMemcpyHostToDevice));
setup_rand_state << <NUM_LATIN_SQUARES, 1 >> > (dev_states1);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (
dev_states1, dev_char_mat, dev_topology, dev_conf_char, dev_is_latin_square_char, dev_perm_char
);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (
dev_states1, dev_rot_mat, dev_topology, dev_conf_rot, dev_is_latin_square_rot, dev_perm_rot
);
cuda_handle_error();
delete[] topology;
delete[] char_mat;
delete[] rot_mat;
CUDA_ERROR_CHECK(cudaFree(dev_char_mat));
CUDA_ERROR_CHECK(cudaFree(dev_rot_mat));
CUDA_ERROR_CHECK(cudaFree(dev_topology));
CUDA_ERROR_CHECK(cudaFree(dev_states1));
bool* out_is_ls_char = new bool[NUM_LATIN_SQUARES];
bool* out_is_ls_rot = new bool[NUM_LATIN_SQUARES];
bool* out_conf_char = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
bool* out_conf_rot = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
int* out_perm_char = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
int* out_perm_rot = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
CUDA_ERROR_CHECK(cudaMemcpy(out_is_ls_char, dev_is_latin_square_char, NUM_LATIN_SQUARES * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_conf_char, dev_conf_char, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_perm_char, dev_perm_char, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_is_ls_rot, dev_is_latin_square_rot, NUM_LATIN_SQUARES * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_conf_rot, dev_conf_rot, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_perm_rot, dev_perm_rot, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaFree(dev_conf_char));
CUDA_ERROR_CHECK(cudaFree(dev_conf_rot));
FILE* fd_ls = fopen(OUTFILE1, "w+");
fprintf(fd_ls, "Latin squares for characteristic matrices:\n\n");
write_output_latin_square(fd_ls, out_is_ls_char, out_conf_char, out_perm_char, NUM_LATIN_SQUARES);
fprintf(fd_ls, "Latin squares for rotation matrices:\n\n");
write_output_latin_square(fd_ls, out_is_ls_rot, out_conf_rot, out_perm_rot, NUM_LATIN_SQUARES);
fclose(fd_ls);
delete[] out_conf_char;
delete[] out_conf_rot;
delete[] out_is_ls_char;
delete[] out_is_ls_rot;
auto end_ls = clock();
printf("Done in %6.4f ms.\n", (double)(end_ls - start_ls) / CLOCKS_PER_SEC);
/// END: COMPUTE LATIN SQUARES
/// COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
printf("Computing MOLS...\n");
auto start_mols = clock();
bool* dev_mols;
int* dev_pairs;
curandState* dev_states2;
dim3 grid_size;
unsigned int NUM_COMPARISONS = NUM_LATIN_SQUARES * NUM_LATIN_SQUARES;
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
grid_size = dim3(NUM_LATIN_SQUARES, MOLS_GRID_PARALLEL, 1);
check_mols_complete << < grid_size, MOLS_SEQ_CUTOFF >> > (
dev_perm_char, dev_perm_rot, dev_is_latin_square_char, dev_is_latin_square_rot, dev_mols, dev_pairs, DEBUG
);
cuda_handle_error();
CUDA_ERROR_CHECK(cudaFree(dev_is_latin_square_char))
CUDA_ERROR_CHECK(cudaFree(dev_is_latin_square_rot));
CUDA_ERROR_CHECK(cudaFree(dev_perm_char));
CUDA_ERROR_CHECK(cudaFree(dev_perm_rot));
bool* out_mols = new bool[NUM_COMPARISONS];
int* out_pairs = new int[NUM_COMPARISONS * 2];
CUDA_ERROR_CHECK(cudaMemcpy(out_mols, dev_mols, NUM_COMPARISONS * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_pairs, dev_pairs, NUM_COMPARISONS * 2 * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaFree(dev_mols));
CUDA_ERROR_CHECK(cudaFree(dev_pairs));
FILE* fd_mols = fopen(OUTFILE2, "w+");
write_output_mols(fd_mols, out_mols, out_perm_char, out_perm_rot, out_pairs, NUM_COMPARISONS);
fclose(fd_mols);
auto end_mols = clock();
printf("Done in %6.4f ms.\n", (double)(end_mols - start_mols) / CLOCKS_PER_SEC);
/// END: COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
// release memory
delete[] out_perm_char;
delete[] out_perm_rot;
delete[] out_mols;
delete[] out_pairs;
}
void single_method_search() {
const unsigned int METHOD = 0;
const unsigned int N = 16;
const unsigned int SWITCHES = N / 2;
const unsigned int STAGES = (2 * log2(N)) - 1;
const unsigned int MOLS_GRID_PARALLEL = 10;
const unsigned int MOLS_SEQ_CUTOFF = 8;
const unsigned int NUM_LATIN_SQUARES = MOLS_GRID_PARALLEL * MOLS_SEQ_CUTOFF;
// const unsigned int BLOCK = 10;
const unsigned int LS_SAMPLES = 200;
const bool DO_COMPLETE_CHECK = true;
const bool DEBUG = true;
freopen(DEBUG_LOG, "w+", stdout);
/// COMPUTE LATIN SQUARES
printf("Computing latin squares...\n");
auto start_ls = clock();
int* topology = new int[16 * 6];
make_butterfly_butterfly_topology(topology);
bool* char_mat = new bool[16 * 8 * 7];
switch (METHOD) {
case 0: make_characteristic_matrices(0, char_mat); break;
case 1: generate_rotation_configurations(topology, char_mat); break;
}
bool* dev_char_mat;
int* dev_topology;
bool* dev_conf;
bool* dev_is_latin_square;
int* dev_perm;
curandState* dev_states1;
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_char_mat, (16 * 8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_topology, (16 * 6) * sizeof(int)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_conf, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_is_latin_square, (NUM_LATIN_SQUARES) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_perm, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_states1, NUM_LATIN_SQUARES * sizeof(curandState)));
CUDA_ERROR_CHECK(cudaMemcpy(dev_char_mat, char_mat, (16 * 8 * 7) * sizeof(bool), cudaMemcpyHostToDevice));
CUDA_ERROR_CHECK(cudaMemcpy(dev_topology, topology, (16 * 6) * sizeof(int), cudaMemcpyHostToDevice));
setup_rand_state << <NUM_LATIN_SQUARES, 1 >> > (dev_states1);
cuda_handle_error();
check_latin_square << <NUM_LATIN_SQUARES, 1 >> > (dev_states1, dev_char_mat, dev_topology, dev_conf, dev_is_latin_square, dev_perm);
cuda_handle_error();
delete[] topology;
delete[] char_mat;
CUDA_ERROR_CHECK(cudaFree(dev_char_mat));
CUDA_ERROR_CHECK(cudaFree(dev_topology));
CUDA_ERROR_CHECK(cudaFree(dev_states1));
bool* out_is_ls = new bool[NUM_LATIN_SQUARES];
bool* out_conf = new bool[(NUM_LATIN_SQUARES) * (8 * 7)];
int* out_perm = new int[(NUM_LATIN_SQUARES) * (16 * 16)];
CUDA_ERROR_CHECK(cudaMemcpy(out_is_ls, dev_is_latin_square, NUM_LATIN_SQUARES * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_conf, dev_conf, (NUM_LATIN_SQUARES) * (8 * 7) * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_perm, dev_perm, (NUM_LATIN_SQUARES) * (16 * 16) * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaFree(dev_conf));
FILE* fd_ls = fopen(OUTFILE1, "w+");
write_output_latin_square(fd_ls, out_is_ls, out_conf, out_perm, NUM_LATIN_SQUARES);
fclose(fd_ls);
delete[] out_conf;
delete[] out_is_ls;
auto end_ls = clock();
printf("Done in %6.4f ms.\n", (double)(end_ls - start_ls) / CLOCKS_PER_SEC);
/// END: COMPUTE LATIN SQUARES
/// COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
printf("Computing MOLS, complete check: %d...\n", DO_COMPLETE_CHECK);
auto start_mols = clock();
bool* dev_mols;
int* dev_pairs;
curandState* dev_states2;
dim3 grid_size;
unsigned int NUM_COMPARISONS;
if (DO_COMPLETE_CHECK) {
NUM_COMPARISONS = NUM_LATIN_SQUARES * NUM_LATIN_SQUARES;
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
grid_size = dim3(NUM_LATIN_SQUARES, MOLS_GRID_PARALLEL, 1);
check_mols_complete << < grid_size, MOLS_SEQ_CUTOFF >> > (dev_perm, dev_is_latin_square, dev_mols, dev_pairs, DEBUG);
cuda_handle_error();
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
}
else {
NUM_COMPARISONS = NUM_LATIN_SQUARES * LS_SAMPLES;
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_states2, (NUM_COMPARISONS) * sizeof(curandState)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_mols, (NUM_COMPARISONS) * sizeof(bool)));
CUDA_ERROR_CHECK(cudaMalloc((void**)&dev_pairs, (NUM_COMPARISONS * 2) * sizeof(int)));
setup_rand_state << <NUM_LATIN_SQUARES, LS_SAMPLES >> > (dev_states2);
cuda_handle_error();
check_mols_random << < NUM_LATIN_SQUARES, LS_SAMPLES >> > (dev_states2, dev_perm, dev_is_latin_square, dev_mols, dev_pairs, DEBUG);
cuda_handle_error();
CUDA_ERROR_CHECK(cudaFree(dev_states2));
}
CUDA_ERROR_CHECK(cudaFree(dev_is_latin_square));
CUDA_ERROR_CHECK(cudaFree(dev_perm));
bool* out_mols = new bool[NUM_COMPARISONS];
int* out_pairs = new int[NUM_COMPARISONS * 2];
CUDA_ERROR_CHECK(cudaMemcpy(out_mols, dev_mols, NUM_COMPARISONS * sizeof(bool), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaMemcpy(out_pairs, dev_pairs, NUM_COMPARISONS * 2 * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_ERROR_CHECK(cudaFree(dev_mols));
CUDA_ERROR_CHECK(cudaFree(dev_pairs));
FILE* fd_mols = fopen(OUTFILE2, "w+");
write_output_mols(fd_mols, out_mols, out_perm, out_pairs, NUM_COMPARISONS);
fclose(fd_mols);
auto end_mols = clock();
printf("Done in %6.4f ms.\n", (double)(end_mols - start_mols) / CLOCKS_PER_SEC);
/// END: COMPUTE MUTUALLY ORTHOGONAL LATIN SQUARES
// release memory
delete[] out_perm;
delete[] out_mols;
delete[] out_pairs;
}
void cuda_main() {
if (CROSS_SEARCH) {
cross_method_search();
}
else {
single_method_search();
}
}
|
b70bba1fea43ab11e04b31046b47170566c2049e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../data/data.hpp"
#include "../../utils/gpu/cuda_parameters.hpp"
namespace SD {
namespace OPT {
namespace TLD {
/**histogram_build_L1(data * , data , int *)
* The function builds the histogram based on the key values of the relation. This
* histogram is later used to partition the relation using the re-order kernel.
* key : The array containing the key values used for building the histogram.
* len : Size of the relation for which the histogram is being built.
* hashKey : The hash value based on which the data is partitioned.
* globalHisto : The histogram data structure in global memory.
*/
__global__ void histogram_build_L1_tile(data *key, long len, data hashKey, int *globalHisto) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
atomicAdd(&globalHisto[i * gridDim.x + blockIdx.x], sharedHisto[i]);
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile(data *key,
data *id,
long len,
data hashKey,
int *globalHisto,
data *keyOut,
data *idOut) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[i * gridDim.x + blockIdx.x] = sharedHisto[i];
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile_opt(data *key,
data *id,
long len,
data hashKey,
int *globalHisto,
data *keyOut,
data *idOut,
int *rHistoFinal,
int index) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int si = rHistoFinal[index];
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
keyOut[si + pos] = key[i];
idOut[si + pos] = id[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile(data *key,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_opt(data *key,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
atomicAdd(&globalHisto[blockIdx.x * hashKeyL2 + i], sharedHisto[i]);
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile(data *key,
data *id,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_opt(data *key,
data *id,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_multiplied(data *key,
int hashKey,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER) * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = (blockIdx.x % GRID_SIZE_MULTIPLIER) * blockDim.x + threadIdx.x + startIndex; i < endIndex;
i += GRID_SIZE_MULTIPLIER * blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[((int) (blockIdx.x / GRID_SIZE_MULTIPLIER)) * hashKey * GRID_SIZE_MULTIPLIER
+ (i * GRID_SIZE_MULTIPLIER) + (blockIdx.x % GRID_SIZE_MULTIPLIER)] = sharedHisto[i];
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_multiplied(data *key,
data *id,
int hashKey,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER) * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[((int) (blockIdx.x / GRID_SIZE_MULTIPLIER)) * hashKey * GRID_SIZE_MULTIPLIER
+ (i * GRID_SIZE_MULTIPLIER) + (blockIdx.x % GRID_SIZE_MULTIPLIER)];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = (blockIdx.x % GRID_SIZE_MULTIPLIER) * blockDim.x + threadIdx.x + startIndex; i < endIndex;
i += GRID_SIZE_MULTIPLIER * blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
}
/**probe(data *, data *, data *, data *, int *, int *, data *)
* The function performs the actual join operation by joining partitions with the same hash value.
* The number of threads in a block must be greater than or equalt to the number of tuples in the
* largest partition. We also assume a 100% match rate for this kernel.
* rKey : The array containing the key values of partitioned relation R.
* rID : The array containing the id values of partitioned relation R.
* sKey : The array containing the key values of partitioned relation S.
* sID : The array containing id key values of partitioned relation S.
* rHisto : The histogram of relation R.
* sHisto : The histogram of relation S.
* pCount : The total number of partitions of each realtion.
* globalPtr : The global pointer that is used to get the index of the output tuple.
* output : The array used for storing the output of the probe operation.
*/
__global__ void probe_tile(data *rKey,
data *rId,
data *sKey,
data *sId,
int *rHisto,
int *sHisto,
int pCount,
int *globalPtr,
data *output,
int
pidStartIndex) {
//allocating shared memory for storing each partition of relation R
extern __shared__ data
sharedPartitionR[];
//pointer storing the index of each output tuple within the outputs generated by a block of threads
__shared__ int sharedPtr;
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int posLocal; //variables for storing the destination of the output tuple
data
sKeyVal; //realtion S Key value for each thread
data
sIdVal; //realtion S Id value for each thread
int matchedRValue; //Matched R relation values.
for (int pid = pidStartIndex + blockIdx.x; pid < pCount; pid += gridDim.x) {
//getting the start and end index of the relation R partition
startIndex = rHisto[pid];
endIndex = rHisto[(pid + 1)];
//loading the relation R partition into shared memory
for (int i = startIndex + threadIdx.x; i < endIndex; i += blockDim.x) {
sharedPartitionR[i - startIndex] = rKey[i];
sharedPartitionR[(endIndex - startIndex) + i - startIndex] = rId[i];
}
sharedPtr = 0;
//barrier
__syncthreads();
if (threadIdx.x < sHisto[(pid + 1)] - sHisto[pid]) {
sKeyVal = sKey[sHisto[pid] + threadIdx.x];
sIdVal = sId[sHisto[pid] + threadIdx.x];
//probing the R partition using the S partition
for (int j = 0; j < endIndex - startIndex; j++) {
if (sKeyVal == sharedPartitionR[j]) {
posLocal = atomicAdd(&sharedPtr, 2);
matchedRValue = sharedPartitionR[endIndex - startIndex + j];
}
}
//barrier
__syncthreads();
if (threadIdx.x == 0) {
//checking if there is space in the global buffer for writing the output
sharedPtr = atomicAdd(globalPtr, sharedPtr);
}
//barrier
__syncthreads();
output[sharedPtr + posLocal] = sIdVal;
output[sharedPtr + posLocal + 1] = matchedRValue;
}
}
}
namespace RCD {
/**histogram_build_L1(data * , data , int *)
* The function builds the histogram based on the key values of the relation. This
* histogram is later used to partition the relation using the re-order kernel.
* key : The array containing the key values used for building the histogram.
* len : Size of the relation for which the histogram is being built.
* hashKey : The hash value based on which the data is partitioned.
* globalHisto : The histogram data structure in global memory.
*/
__global__ void histogram_build_L1_tile_rcd(Record *records, long len, data hashKey, int *globalHisto) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = tid; i < len; i += numWorkItems) {
hashValue = records[i].key % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[i * gridDim.x + blockIdx.x] = sharedHisto[i];
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile_opt_rcd(Record *in,
long len,
data hashKey,
int *globalHisto,
Record *out,
int startIndex) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = in[i].key % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
out[startIndex + pos] = in[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_opt_rcd(Record *records,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (records[i].key / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
atomicAdd(&globalHisto[blockIdx.x * hashKeyL2 + i], sharedHisto[i]);
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_opt_rcd(Record *in,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
Record *out,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (in[i].key / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
out[pos] = in[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
}
}
}
} | b70bba1fea43ab11e04b31046b47170566c2049e.cu | #include "../../data/data.hpp"
#include "../../utils/gpu/cuda_parameters.hpp"
namespace SD {
namespace OPT {
namespace TLD {
/**histogram_build_L1(data * , data , int *)
* The function builds the histogram based on the key values of the relation. This
* histogram is later used to partition the relation using the re-order kernel.
* key : The array containing the key values used for building the histogram.
* len : Size of the relation for which the histogram is being built.
* hashKey : The hash value based on which the data is partitioned.
* globalHisto : The histogram data structure in global memory.
*/
__global__ void histogram_build_L1_tile(data *key, long len, data hashKey, int *globalHisto) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
atomicAdd(&globalHisto[i * gridDim.x + blockIdx.x], sharedHisto[i]);
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile(data *key,
data *id,
long len,
data hashKey,
int *globalHisto,
data *keyOut,
data *idOut) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[i * gridDim.x + blockIdx.x] = sharedHisto[i];
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile_opt(data *key,
data *id,
long len,
data hashKey,
int *globalHisto,
data *keyOut,
data *idOut,
int *rHistoFinal,
int index) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int si = rHistoFinal[index];
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = key[i] % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
keyOut[si + pos] = key[i];
idOut[si + pos] = id[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile(data *key,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_opt(data *key,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
atomicAdd(&globalHisto[blockIdx.x * hashKeyL2 + i], sharedHisto[i]);
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile(data *key,
data *id,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_opt(data *key,
data *id,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_multiplied(data *key,
int hashKey,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER) * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = (blockIdx.x % GRID_SIZE_MULTIPLIER) * blockDim.x + threadIdx.x + startIndex; i < endIndex;
i += GRID_SIZE_MULTIPLIER * blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[((int) (blockIdx.x / GRID_SIZE_MULTIPLIER)) * hashKey * GRID_SIZE_MULTIPLIER
+ (i * GRID_SIZE_MULTIPLIER) + (blockIdx.x % GRID_SIZE_MULTIPLIER)] = sharedHisto[i];
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_multiplied(data *key,
data *id,
int hashKey,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
data *keyOut,
data *idOut,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER) * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x / GRID_SIZE_MULTIPLIER + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[((int) (blockIdx.x / GRID_SIZE_MULTIPLIER)) * hashKey * GRID_SIZE_MULTIPLIER
+ (i * GRID_SIZE_MULTIPLIER) + (blockIdx.x % GRID_SIZE_MULTIPLIER)];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = (blockIdx.x % GRID_SIZE_MULTIPLIER) * blockDim.x + threadIdx.x + startIndex; i < endIndex;
i += GRID_SIZE_MULTIPLIER * blockDim.x) {
hashValue = (key[i] / hashKeyL1) % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1);
keyOut[pos] = key[i];
idOut[pos] = id[i];
}
}
/**probe(data *, data *, data *, data *, int *, int *, data *)
* The function performs the actual join operation by joining partitions with the same hash value.
* The number of threads in a block must be greater than or equalt to the number of tuples in the
* largest partition. We also assume a 100% match rate for this kernel.
* rKey : The array containing the key values of partitioned relation R.
* rID : The array containing the id values of partitioned relation R.
* sKey : The array containing the key values of partitioned relation S.
* sID : The array containing id key values of partitioned relation S.
* rHisto : The histogram of relation R.
* sHisto : The histogram of relation S.
* pCount : The total number of partitions of each realtion.
* globalPtr : The global pointer that is used to get the index of the output tuple.
* output : The array used for storing the output of the probe operation.
*/
__global__ void probe_tile(data *rKey,
data *rId,
data *sKey,
data *sId,
int *rHisto,
int *sHisto,
int pCount,
int *globalPtr,
data *output,
int
pidStartIndex) {
//allocating shared memory for storing each partition of relation R
extern __shared__ data
sharedPartitionR[];
//pointer storing the index of each output tuple within the outputs generated by a block of threads
__shared__ int sharedPtr;
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int posLocal; //variables for storing the destination of the output tuple
data
sKeyVal; //realtion S Key value for each thread
data
sIdVal; //realtion S Id value for each thread
int matchedRValue; //Matched R relation values.
for (int pid = pidStartIndex + blockIdx.x; pid < pCount; pid += gridDim.x) {
//getting the start and end index of the relation R partition
startIndex = rHisto[pid];
endIndex = rHisto[(pid + 1)];
//loading the relation R partition into shared memory
for (int i = startIndex + threadIdx.x; i < endIndex; i += blockDim.x) {
sharedPartitionR[i - startIndex] = rKey[i];
sharedPartitionR[(endIndex - startIndex) + i - startIndex] = rId[i];
}
sharedPtr = 0;
//barrier
__syncthreads();
if (threadIdx.x < sHisto[(pid + 1)] - sHisto[pid]) {
sKeyVal = sKey[sHisto[pid] + threadIdx.x];
sIdVal = sId[sHisto[pid] + threadIdx.x];
//probing the R partition using the S partition
for (int j = 0; j < endIndex - startIndex; j++) {
if (sKeyVal == sharedPartitionR[j]) {
posLocal = atomicAdd(&sharedPtr, 2);
matchedRValue = sharedPartitionR[endIndex - startIndex + j];
}
}
//barrier
__syncthreads();
if (threadIdx.x == 0) {
//checking if there is space in the global buffer for writing the output
sharedPtr = atomicAdd(globalPtr, sharedPtr);
}
//barrier
__syncthreads();
output[sharedPtr + posLocal] = sIdVal;
output[sharedPtr + posLocal + 1] = matchedRValue;
}
}
}
namespace RCD {
/**histogram_build_L1(data * , data , int *)
* The function builds the histogram based on the key values of the relation. This
* histogram is later used to partition the relation using the re-order kernel.
* key : The array containing the key values used for building the histogram.
* len : Size of the relation for which the histogram is being built.
* hashKey : The hash value based on which the data is partitioned.
* globalHisto : The histogram data structure in global memory.
*/
__global__ void histogram_build_L1_tile_rcd(Record *records, long len, data hashKey, int *globalHisto) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = tid; i < len; i += numWorkItems) {
hashValue = records[i].key % hashKey;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
globalHisto[i * gridDim.x + blockIdx.x] = sharedHisto[i];
}
}
/**reorder_L1(data *, data *, long , data , int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L1' kernel.
* key : The key values of the relation to be re-ordered.
* id : The id values of the relation to be re-ordered.
* len : Size of the relation which is being re-ordered.
* hashKey : The hash value based on which the data is re-ordered.
* globalHisto : The histogram data structure in the global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L1_tile_opt_rcd(Record *in,
long len,
data hashKey,
int *globalHisto,
Record *out,
int startIndex) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
//getting thread id and work item count
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numWorkItems = gridDim.x * blockDim.x;
int hashValue; //variable for storing the hash value of each tuple
int pos; //variable for storing the destination for re-ordering
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKey; i += blockDim.x) {
sharedHisto[i] = globalHisto[i * gridDim.x + blockIdx.x];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = tid; i < len; i += numWorkItems) {
hashValue = in[i].key % hashKey;
pos = atomicAdd(&sharedHisto[hashValue], 1); //getting the destination position
out[startIndex + pos] = in[i];
}
}
/**histogram_build_L2(data *, data *, long , int , int *, int *)
* This function builds the histogram for second level of partitioning.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
*/
__global__ void histogram_build_L2_tile_opt_rcd(Record *records,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//initializing all histogram entries in shared memory to 0. Otherwise there could be left over values from another thread block.
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = 0;
}
//barrier
__syncthreads();
//building the histogram in shared memory
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (records[i].key / hashKeyL1) % hashKeyL2;
atomicAdd(&sharedHisto[hashValue], 1);
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
atomicAdd(&globalHisto[blockIdx.x * hashKeyL2 + i], sharedHisto[i]);
}
}
/**reorder_L2(data *, data *, data , data , int *, int *, data *, data *)
* This function re-orders a relation based on the histogram built using 'histogram_build_L2' kernel.
* key : The array containing the key values which has already been partitioned by reorder_L1.
* id : The array containing the id values which has already been partitioned by reorder_L1.
* hashKey : The hash value based on which the data will be partitioned in the second pass.
* hashKeyL1 : The hash value based on which the data was partitioned in the first pass.
* globalHisto : The histogram data structure for the second partitioning pass, located in the global memory.
* globalHistoL1 : The histogram data structure generated during level 1 partitioning, also located in global memory.
* keyOut : The array storing the re-ordered key values.
* idOut : The array storing the re-ordered id values.
*/
__global__ void reorder_L2_tile_opt_rcd(Record *in,
int hashKeyL2,
int hashKeyL1,
int *globalHisto,
int *globalHistoL1,
Record *out,
int gridDimL1) {
//allocating shared memory for storing the histogram
extern __shared__ int sharedHisto[];
int hashValue; //variable for storing the hash value of each tuple
long startIndex,
endIndex; //used to store the start index or end index of each partition generated in the previous stage
int pos; //variable for storing the destination for re-ordering
//getting the start and end index of the partition to be processed by the current thread block
startIndex = globalHistoL1[blockIdx.x * gridDimL1];
endIndex = globalHistoL1[(blockIdx.x + 1) * gridDimL1];
//reading the histogram data from global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
sharedHisto[i] = globalHisto[blockIdx.x * hashKeyL2 + i];
}
//barrier
__syncthreads();
//re-ordering the data
for (long i = threadIdx.x + startIndex; i < endIndex; i += blockDim.x) {
hashValue = (in[i].key / hashKeyL1) % hashKeyL2;
pos = atomicAdd(&sharedHisto[hashValue], 1);
out[pos] = in[i];
}
//barrier
__syncthreads();
//writing the histogram back into the global memory
for (int i = threadIdx.x; i < hashKeyL2; i += blockDim.x) {
globalHisto[blockIdx.x * hashKeyL2 + i] = sharedHisto[i];
}
}
}
}
}
} |
46f85cc1c9dd88dbf5944aabc1fedbc73cd98897.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <cstdio>
#include <omp.h>
#define N 300
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
float elapsed_time;
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
hipSetDevice(dev);
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *)malloc(n_streams * sizeof(hipStream_t));
for (int i = 0; i < n_streams; i++)
{
hipStreamCreate(&(streams[i]));
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
printf("> grid %d block %d\n", grid.x, block.x);
// creat events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// record start event
hipEventRecord(start, 0);
// dispatch job with depth first ordering
omp_set_num_threads(n_streams);
for (int i = 0; i < n_streams; ++i)
#pragma omp parallel
{
hipLaunchKernelGGL(( kernel_1) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_2) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_3) , dim3(grid), dim3(block), 0, streams[i] , );
hipLaunchKernelGGL(( kernel_4) , dim3(grid), dim3(block), 0, streams[i] , );
}
// record stop event
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculate elapsed time
hipEventElapsedTime(&elapsed_time, start, stop);
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
hipStreamDestroy(streams[i]);
}
free(streams);
// destroy events
hipEventDestroy(start);
hipEventDestroy(stop);
// reset device
hipDeviceReset();
system("Pause");
return 0;
}
| 46f85cc1c9dd88dbf5944aabc1fedbc73cd98897.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <cstdio>
#include <omp.h>
#define N 300
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
float elapsed_time;
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
cudaSetDevice(dev);
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++)
{
cudaStreamCreate(&(streams[i]));
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
printf("> grid %d block %d\n", grid.x, block.x);
// creat events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record start event
cudaEventRecord(start, 0);
// dispatch job with depth first ordering
omp_set_num_threads(n_streams);
for (int i = 0; i < n_streams; ++i)
#pragma omp parallel
{
kernel_1 <<<grid, block, 0, streams[i] >>>();
kernel_2 <<<grid, block, 0, streams[i] >>>();
kernel_3 <<<grid, block, 0, streams[i] >>>();
kernel_4 <<<grid, block, 0, streams[i] >>>();
}
// record stop event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate elapsed time
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
cudaStreamDestroy(streams[i]);
}
free(streams);
// destroy events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// reset device
cudaDeviceReset();
system("Pause");
return 0;
}
|
605155a82547a8a7aa7259d8ac0fe633ddb826ac.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
__global__ void Array_Reduction_Kernel(float * g_iData, float *g_Intermediat)
{
extern __shared__ float sData[]; //Size is determined by the host
int index = blockDim.x * blockIdx.x + threadIdx.x;
int tId = threadIdx.x;
//First every thread in the block puts its value intor the shared memory
sData[tId] = g_iData[index];
__syncthreads();
//VERSION 1
//This mod makes it inefficient
//for (int t = 1; t < blockDim.x; t *= 2)
//{
// if (tId % (2 * t) == 0) //Even threads or even elements of the array. i.e. thread0, thread2, ...
// {
// sData[tId] += sData[tId + t];
// }
// //At each iteration of the for loop. Thread wait at this barrier so other threads will read here.
// __syncthreads();
//}
//VERSION 2, this has bank conflict problem
// //Bank conflict means different threads are accessing
// //adjacent values in the memory. How far the threads can access
// //depends on the Bank Size. For example, if the bank size is 4 bytes
// //then the memory access would be as follow:
// //Bank | 1 | 2 | 3 | ...
// //Address | 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | ...
// //Address | 64 65 66 67 | 68 69 70 71 | 72 73 74 75 | ...
// //Now if two threads aceess the adderesses 0, 1, this is inefficient
// //cause GPU will serialize these two threads. They have to be on two different banks
// //so we can aceess the memory in parallel.
//for (int t = 1; t < blockDim.x; t *= 2)
//{
// //We should do this
// int index2 = 2 * tId * t;
// if (index2 < blockDim.x)
// {
// sData[index2] += sData[index2 + t];
// }
// //At each iteration of the for loop. Thread wait at this barrier so other threads will read here.
// __syncthreads();
//}
//VERSION 3
//Fixing the bank conflicts. Make the threads to acess further threads.
for (unsigned int idx = blockDim.x / 2; idx > 0; idx >>= 1)
{
if (tId < idx)
{
sData[tId] += sData[idx + tId];
}
__syncthreads();
}
//Move the summation to the global memory now
if (tId == 0)
{
g_Intermediat[blockIdx.x] = sData[0];
}
}
extern "C"
void Array_Reduction(float *h_ArrayReduction, unsigned int ArraySize, long &lSum)
{
int blocks = 1024;
int threadPerBlock = 1024; // 1024 blocks of 1024 threads per each block
int iMemSize = threadPerBlock * sizeof(float); // to pass to the cuda kernel so it allocates shared memory there
//Device array
float *d_ArrayReduction = nullptr;
checkCudaErrors(hipMalloc((void **)&d_ArrayReduction, ArraySize * sizeof(float)));
float *d_ArrayReductionIntermediate = nullptr;
checkCudaErrors(hipMalloc((void **)&d_ArrayReductionIntermediate, blocks * sizeof(float)));
float *d_ArrayReductionOut = nullptr;
checkCudaErrors(hipMalloc((void **)&d_ArrayReductionOut, 1 * sizeof(float)));
//Move the data from host to device
checkCudaErrors(hipMemcpy(d_ArrayReduction, h_ArrayReduction, ArraySize * sizeof(float), hipMemcpyHostToDevice));
long long lStart, lEnd, lFreq;
QueryPerformanceFrequency((LARGE_INTEGER*)&lFreq);
QueryPerformanceCounter((LARGE_INTEGER*)&lStart);
//First call, each block will compute its summation and will put it in one element of output array
//So we get one array of 1024 elements.
Array_Reduction_Kernel << <blocks, threadPerBlock, iMemSize >> >(d_ArrayReduction, d_ArrayReductionIntermediate);
//Second call, we have 1024 elemets left, with 1 block of 1024 elemnts each, we compute the summation.
blocks = 1;
threadPerBlock = 1024;
hipLaunchKernelGGL(( Array_Reduction_Kernel) , dim3(blocks), dim3(threadPerBlock), iMemSize , 0, d_ArrayReductionIntermediate, d_ArrayReductionOut);
QueryPerformanceCounter((LARGE_INTEGER*)&lEnd);
double dbTime = (lEnd - lStart)* 1000;
dbTime /= lFreq;
std::cout << "Total time: " << dbTime << " ms" << std::endl;
checkCudaErrors(hipMemcpy(h_ArrayReduction, d_ArrayReductionOut, 1 * sizeof(float), hipMemcpyDeviceToHost));
lSum = h_ArrayReduction[0];
checkCudaErrors(hipFree(d_ArrayReduction));
}
| 605155a82547a8a7aa7259d8ac0fe633ddb826ac.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
__global__ void Array_Reduction_Kernel(float * g_iData, float *g_Intermediat)
{
extern __shared__ float sData[]; //Size is determined by the host
int index = blockDim.x * blockIdx.x + threadIdx.x;
int tId = threadIdx.x;
//First every thread in the block puts its value intor the shared memory
sData[tId] = g_iData[index];
__syncthreads();
//VERSION 1
//This mod makes it inefficient
//for (int t = 1; t < blockDim.x; t *= 2)
//{
// if (tId % (2 * t) == 0) //Even threads or even elements of the array. i.e. thread0, thread2, ...
// {
// sData[tId] += sData[tId + t];
// }
// //At each iteration of the for loop. Thread wait at this barrier so other threads will read here.
// __syncthreads();
//}
//VERSION 2, this has bank conflict problem
// //Bank conflict means different threads are accessing
// //adjacent values in the memory. How far the threads can access
// //depends on the Bank Size. For example, if the bank size is 4 bytes
// //then the memory access would be as follow:
// //Bank | 1 | 2 | 3 | ...
// //Address | 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | ...
// //Address | 64 65 66 67 | 68 69 70 71 | 72 73 74 75 | ...
// //Now if two threads aceess the adderesses 0, 1, this is inefficient
// //cause GPU will serialize these two threads. They have to be on two different banks
// //so we can aceess the memory in parallel.
//for (int t = 1; t < blockDim.x; t *= 2)
//{
// //We should do this
// int index2 = 2 * tId * t;
// if (index2 < blockDim.x)
// {
// sData[index2] += sData[index2 + t];
// }
// //At each iteration of the for loop. Thread wait at this barrier so other threads will read here.
// __syncthreads();
//}
//VERSION 3
//Fixing the bank conflicts. Make the threads to acess further threads.
for (unsigned int idx = blockDim.x / 2; idx > 0; idx >>= 1)
{
if (tId < idx)
{
sData[tId] += sData[idx + tId];
}
__syncthreads();
}
//Move the summation to the global memory now
if (tId == 0)
{
g_Intermediat[blockIdx.x] = sData[0];
}
}
extern "C"
void Array_Reduction(float *h_ArrayReduction, unsigned int ArraySize, long &lSum)
{
int blocks = 1024;
int threadPerBlock = 1024; // 1024 blocks of 1024 threads per each block
int iMemSize = threadPerBlock * sizeof(float); // to pass to the cuda kernel so it allocates shared memory there
//Device array
float *d_ArrayReduction = nullptr;
checkCudaErrors(cudaMalloc((void **)&d_ArrayReduction, ArraySize * sizeof(float)));
float *d_ArrayReductionIntermediate = nullptr;
checkCudaErrors(cudaMalloc((void **)&d_ArrayReductionIntermediate, blocks * sizeof(float)));
float *d_ArrayReductionOut = nullptr;
checkCudaErrors(cudaMalloc((void **)&d_ArrayReductionOut, 1 * sizeof(float)));
//Move the data from host to device
checkCudaErrors(cudaMemcpy(d_ArrayReduction, h_ArrayReduction, ArraySize * sizeof(float), cudaMemcpyHostToDevice));
long long lStart, lEnd, lFreq;
QueryPerformanceFrequency((LARGE_INTEGER*)&lFreq);
QueryPerformanceCounter((LARGE_INTEGER*)&lStart);
//First call, each block will compute its summation and will put it in one element of output array
//So we get one array of 1024 elements.
Array_Reduction_Kernel << <blocks, threadPerBlock, iMemSize >> >(d_ArrayReduction, d_ArrayReductionIntermediate);
//Second call, we have 1024 elemets left, with 1 block of 1024 elemnts each, we compute the summation.
blocks = 1;
threadPerBlock = 1024;
Array_Reduction_Kernel <<<blocks, threadPerBlock, iMemSize >>> (d_ArrayReductionIntermediate, d_ArrayReductionOut);
QueryPerformanceCounter((LARGE_INTEGER*)&lEnd);
double dbTime = (lEnd - lStart)* 1000;
dbTime /= lFreq;
std::cout << "Total time: " << dbTime << " ms" << std::endl;
checkCudaErrors(cudaMemcpy(h_ArrayReduction, d_ArrayReductionOut, 1 * sizeof(float), cudaMemcpyDeviceToHost));
lSum = h_ArrayReduction[0];
checkCudaErrors(cudaFree(d_ArrayReduction));
}
|
c1f922beba75edb27ccd912d3981b89d35cdf746.hip | // !!! This is a file automatically generated by hipify!!!
/* Vector addition deom on GPU
To compile: nvcc -o testprog1 testprog1.cu
*/
#include <iostream>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device. This is executed by ONE
// stream processor
__global__ void vec_add(float* A, float* B, float* C, int N)
{
// What element of the array does this thread work on
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// main routine that executes on the host
int main(void)
{
int n;
int N = 10000000;
size_t size = N * sizeof(float);
// Allocate in HOST memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize vectors
for (n = 0; n < N; ++n) {
h_A[n] = 3.2333 * n;
h_B[n] = 8.09287 * n;
}
// Allocate in DEVICE memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, size);
hipMalloc(&d_B, size);
hipMalloc(&d_C, size);
// Copy vectors from host to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "Launching a grid of "
<< blocksPerGrid << " "
<< threadsPerBlock * blocksPerGrid
<< " threads" << std::endl;
hipLaunchKernelGGL(( vec_add), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// Copy result from device memory into host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Print the first and last 10 elements of the arrays
for (n = 0; n < N; ++n) {
if (n < 10 || n >= N - 10)
std::cout << n << " " << h_A[n] << " " << h_B[n]
<< " " << h_C[n] << std::endl;
}
free(h_A);
free(h_B);
free(h_C);
}
| c1f922beba75edb27ccd912d3981b89d35cdf746.cu | /* Vector addition deom on GPU
To compile: nvcc -o testprog1 testprog1.cu
*/
#include <iostream>
#include <cuda.h>
// Kernel that executes on the CUDA device. This is executed by ONE
// stream processor
__global__ void vec_add(float* A, float* B, float* C, int N)
{
// What element of the array does this thread work on
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// main routine that executes on the host
int main(void)
{
int n;
int N = 10000000;
size_t size = N * sizeof(float);
// Allocate in HOST memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize vectors
for (n = 0; n < N; ++n) {
h_A[n] = 3.2333 * n;
h_B[n] = 8.09287 * n;
}
// Allocate in DEVICE memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
// Copy vectors from host to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "Launching a grid of "
<< blocksPerGrid << " "
<< threadsPerBlock * blocksPerGrid
<< " threads" << std::endl;
vec_add<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory into host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Print the first and last 10 elements of the arrays
for (n = 0; n < N; ++n) {
if (n < 10 || n >= N - 10)
std::cout << n << " " << h_A[n] << " " << h_B[n]
<< " " << h_C[n] << std::endl;
}
free(h_A);
free(h_B);
free(h_C);
}
|
1c8dbf6734aa7e95fb7dd3820c57f0a9d509684d.hip | // !!! This is a file automatically generated by hipify!!!
// Cdigo Matrix multiplication: C = A * B. Super simplificado.
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <chrono>
#define BLOCK_SIZE 32
#ifndef VECTOR_SIZE
#define VECTOR_SIZE 128
#endif
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calcule o ndice da linha do elemento em P e M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calcule o ndice da coluna do elemento em P e N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// cada thread calcula um elemento da sub-matriz
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main(int argc, char **argv)
{
const int size = VECTOR_SIZE;
// const int nIter = 100000;
float *h_A = (float *)malloc(sizeof(float) * size * size);
float *h_B = (float *)malloc(sizeof(float) * size * size);
float *h_C = (float *)malloc(sizeof(float) * size * size);
for (int i = 0; i < size * size; ++i) { h_A[i] = 1.0f; h_B[i] = 1.0f; }
float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, sizeof(float) * size * size);
hipMalloc((void **) &d_B, sizeof(float) * size * size);
hipMalloc((void **) &d_C, sizeof(float) * size * size);
hipMemcpy(d_A, h_A, sizeof(float) * size * size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(float) * size * size, hipMemcpyHostToDevice);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(size / threads.x, size / threads.y);
// -------------------------------------------------------------------------------
// MATRIXMULKERNEL
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
// for (int j = 0; j < nIter; j++)
hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid),dim3(threads), 0, 0, d_A, d_B, d_C, size);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
// float msecPerMatrixMul = msecTotal / nIter;
float msecPerMatrixMul = msecTotal;
printf("Time= %f\n", msecPerMatrixMul);
// Copy result from device to host
hipMemcpy(h_C, d_C, sizeof(float) * size * size, hipMemcpyDeviceToHost);
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(size * size); i++)
{
double abs_err = fabs(h_C[i] - (size * 1.0f));
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/size ;
if (rel_err > eps)
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], size*1.0f, eps);
}
free(h_A); free(h_B); free(h_C);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
return(0);
}
| 1c8dbf6734aa7e95fb7dd3820c57f0a9d509684d.cu | // Código Matrix multiplication: C = A * B. Super simplificado.
#include <stdio.h>
#include <cuda_runtime.h>
#include <chrono>
#define BLOCK_SIZE 32
#ifndef VECTOR_SIZE
#define VECTOR_SIZE 128
#endif
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calcule o índice da linha do elemento em P e M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calcule o índice da coluna do elemento em P e N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// cada thread calcula um elemento da sub-matriz
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main(int argc, char **argv)
{
const int size = VECTOR_SIZE;
// const int nIter = 100000;
float *h_A = (float *)malloc(sizeof(float) * size * size);
float *h_B = (float *)malloc(sizeof(float) * size * size);
float *h_C = (float *)malloc(sizeof(float) * size * size);
for (int i = 0; i < size * size; ++i) { h_A[i] = 1.0f; h_B[i] = 1.0f; }
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, sizeof(float) * size * size);
cudaMalloc((void **) &d_B, sizeof(float) * size * size);
cudaMalloc((void **) &d_C, sizeof(float) * size * size);
cudaMemcpy(d_A, h_A, sizeof(float) * size * size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * size * size, cudaMemcpyHostToDevice);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(size / threads.x, size / threads.y);
// -------------------------------------------------------------------------------
// MATRIXMULKERNEL
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
// for (int j = 0; j < nIter; j++)
MatrixMulKernel<<<grid,threads>>>(d_A, d_B, d_C, size);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
// float msecPerMatrixMul = msecTotal / nIter;
float msecPerMatrixMul = msecTotal;
printf("Time= %f\n", msecPerMatrixMul);
// Copy result from device to host
cudaMemcpy(h_C, d_C, sizeof(float) * size * size, cudaMemcpyDeviceToHost);
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(size * size); i++)
{
double abs_err = fabs(h_C[i] - (size * 1.0f));
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/size ;
if (rel_err > eps)
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], size*1.0f, eps);
}
free(h_A); free(h_B); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return(0);
}
|
b2cf86cbf58e4669b8d76ad87bf534657c3e4827.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl_hip.cuh"
#include "../DeviceDefs.cuh"
namespace faiss { namespace gpu {
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_IMPL(float, false, 2048, 8);
#endif
} } // namespace
| b2cf86cbf58e4669b8d76ad87bf534657c3e4827.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl.cuh"
#include "../DeviceDefs.cuh"
namespace faiss { namespace gpu {
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_IMPL(float, false, 2048, 8);
#endif
} } // namespace
|
2210faf183127fbc5d1aff59a05e13284f885708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (6)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;// __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)FORMA_BLOCKDIM_X;
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;//__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("__iter_0__ = %d, var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_0__, __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-2),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;//__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-3),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) | __iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t4;
t4 = __temp_a80__;
}
}
if( __iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__-2),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__-3),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| 2210faf183127fbc5d1aff59a05e13284f885708.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (6)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;// __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)FORMA_BLOCKDIM_X;
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;//__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("__iter_0__ = %d, var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_0__, __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2)) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3;//__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t4;
t4 = __temp_a80__;
//printf ("var1[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-1, __iter_4__, __iter_5__, __temp_a80__, t3);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2)) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4;//__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-2),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2)) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5;//__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
}
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-3),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
}
__syncthreads ();
for (int __iter_2__ = 1; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1))) {
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
b2 = __tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
float __temp_a3__ = (__tilevar_2__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a7__ = (__tilevar_2__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t2);
float __temp_a28__ = (__temp_a23__ + 0.166f * b2);
float __temp_a32__ = (__tilevar_2__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t3;
t3 = __temp_a33__;
//printf ("var0[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__, __iter_4__, __iter_5__, __temp_a33__, t2);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
if (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) | __iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) {
b3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t3 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_a50__ = (__tilevar_3__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a54__ = (__tilevar_3__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t3);
float __temp_a75__ = (__temp_a70__ + 0.166f * b3);
float __temp_a79__ = (__tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b4 = __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t4;
t4 = __temp_a80__;
}
}
if( __iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__-2),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) {
b4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t4 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) {
if( __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
float __temp_a94__ = (__tilevar_4__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a95__ = (__tilevar_4__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t4);
float __temp_a104__ = (__temp_a102__ + 0.166f * b4);
float __temp_a105__ = (__tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b5 = __tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = t5;
t5 = __temp_a106__;
//printf ("var2[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-2, __iter_4__, __iter_5__, __temp_a106__, t4);
}
}
if(__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2)) ){
if (__iter_4__ < (FORMA_MAX((__iter_1__-3),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) {
b5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))];
__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))];
t5 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
}
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) {
if(__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_a120__ = (__tilevar_5__[__iter_5__+1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a121__ = (__tilevar_5__[__iter_5__-1+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+1+(EXTENT-__iter_1__))]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-1+(EXTENT-__iter_1__))]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t5);
float __temp_a130__ = (__temp_a128__ + 0.166f * b5);
float __temp_a131__ = (__tilevar_5__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
//printf ("var3[%d][%d][%d] = %.6f (%.6f) \n", __iter_2__-3, __iter_4__, __iter_5__, __temp_a132__, t5);
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
__kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
__kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
13418b6714ca019aebbd7ddc7ee9a17af535bdb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
#include <hip/hip_complex.h>
__global__ void floor_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = floor(dy[i]);
}
}
| 13418b6714ca019aebbd7ddc7ee9a17af535bdb5.cu | extern "C"
#include <math.h>
#include <cuComplex.h>
__global__ void floor_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = floor(dy[i]);
}
}
|
632b91cddcb85ddb093f9979fe70274306317ba4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "linear_algebra.h"
#include "dvec.h"
using namespace std;
using namespace la;
int main(int argc, char const *argv[]){
const int rows_a=3, cols_a=3, range=10;
Matrix A(rows_a, Vector(cols_a));
Matrix B(rows_a, Vector(cols_a));
Matrix C(rows_a, Vector(cols_a));
fill_matrix(A, range);
fill_matrix(B, range);
print_matrix(A);
print_matrix(B);
// Matrix A_red(rows_a, Vector(cols_a));
// const int k_col = 4;
// pca(A, A_red, k_col);
// print_matrix(A_red);
// compress(argv[1]);
dvec<float> d_A(rows_a*cols_a);
dvec<float> d_B(rows_a*cols_a);
dvec<float> d_C(rows_a*cols_a);
d_A.set(&A[0][0]);
d_B.set(&B[0][0]);
matrixMul(d_A.data(), d_B.data(), d_C.data(), rows_a);
hipDeviceSynchronize();
d_C.get(&C[0][0]);
print_matrix(C);
return 0;
} | 632b91cddcb85ddb093f9979fe70274306317ba4.cu | #include <iostream>
#include "linear_algebra.h"
#include "dvec.h"
using namespace std;
using namespace la;
int main(int argc, char const *argv[]){
const int rows_a=3, cols_a=3, range=10;
Matrix A(rows_a, Vector(cols_a));
Matrix B(rows_a, Vector(cols_a));
Matrix C(rows_a, Vector(cols_a));
fill_matrix(A, range);
fill_matrix(B, range);
print_matrix(A);
print_matrix(B);
// Matrix A_red(rows_a, Vector(cols_a));
// const int k_col = 4;
// pca(A, A_red, k_col);
// print_matrix(A_red);
// compress(argv[1]);
dvec<float> d_A(rows_a*cols_a);
dvec<float> d_B(rows_a*cols_a);
dvec<float> d_C(rows_a*cols_a);
d_A.set(&A[0][0]);
d_B.set(&B[0][0]);
matrixMul(d_A.data(), d_B.data(), d_C.data(), rows_a);
cudaDeviceSynchronize();
d_C.get(&C[0][0]);
print_matrix(C);
return 0;
} |
4136a01912c38aab4abd19326c09ac11e4b82a2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
__global__ void print_from_gpu(void) {
printf("Hello World! from thread [%d,%d] \
From device\n", threadIdx.x, blockIdx.x);
}
int main(void) {
printf("Hello World from host!\n");
print_from_gpu << <1, 1 >> > ();
hipDeviceSynchronize();
return 0;
}
| 4136a01912c38aab4abd19326c09ac11e4b82a2d.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
__global__ void print_from_gpu(void) {
printf("Hello World! from thread [%d,%d] \
From device\n", threadIdx.x, blockIdx.x);
}
int main(void) {
printf("Hello World from host!\n");
print_from_gpu << <1, 1 >> > ();
cudaDeviceSynchronize();
return 0;
}
|
254e2c2bc3619fd1e92d76bc26e3e093395c2ebd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/device/buffer.hpp>
#include <gtest/gtest.h>
#include <raft/sparse/cusparse_wrappers.h>
#include <test_utils.h>
#include <sparse/convert/dense.cuh>
namespace raft {
namespace sparse {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct CSRToDenseInputs {
value_idx nrows;
value_idx ncols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_ref_h;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(::std::ostream &os,
const CSRToDenseInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class CSRToDenseTest
: public ::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
allocate(indptr, indptr_h.size());
allocate(indices, indices_h.size());
allocate(data, data_h.size());
update_device(indptr, indptr_h.data(), indptr_h.size(), stream);
update_device(indices, indices_h.data(), indices_h.size(), stream);
update_device(data, data_h.data(), data_h.size(), stream);
std::vector<value_t> out_ref_h = params.out_ref_h;
allocate(out_ref, out_ref_h.size());
update_device(out_ref, out_ref_h.data(), out_ref_h.size(), stream);
allocate(out, out_ref_h.size());
}
void SetUp() override {
params = ::testing::TestWithParam<
CSRToDenseInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(hipStreamCreate(&stream));
CUSPARSE_CHECK(hipsparseCreate(&handle));
make_data();
convert::csr_to_dense(handle, params.nrows, params.ncols, indptr, indices,
data, params.nrows, out, stream, true);
CUDA_CHECK(hipStreamSynchronize(stream));
CUSPARSE_CHECK(hipsparseDestroy(handle));
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(indptr));
CUDA_CHECK(hipFree(indices));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_ref));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out, out_ref, params.out_ref_h.size(), Compare<value_t>()));
}
protected:
hipStream_t stream;
hipsparseHandle_t handle;
// input data
value_idx *indptr, *indices;
value_t *data;
// output data
value_t *out;
// expected output data
value_t *out_ref;
CSRToDenseInputs<value_idx, value_t> params;
};
const std::vector<CSRToDenseInputs<int, float>> inputs_i32_f = {
{4,
4,
{0, 2, 4, 6, 8},
{0, 1, 2, 3, 0, 1, 2, 3}, // indices
{1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f},
{1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 5.0f, 50.0f, 28.0f, 0.0f, 0.0f,
0.0f, 0.0f, 16.0f, 2.0f}},
};
typedef CSRToDenseTest<int, float> CSRToDenseTestF;
TEST_P(CSRToDenseTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(CSRToDenseTest, CSRToDenseTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace sparse
}; // end namespace raft
| 254e2c2bc3619fd1e92d76bc26e3e093395c2ebd.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/device/buffer.hpp>
#include <gtest/gtest.h>
#include <raft/sparse/cusparse_wrappers.h>
#include <test_utils.h>
#include <sparse/convert/dense.cuh>
namespace raft {
namespace sparse {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct CSRToDenseInputs {
value_idx nrows;
value_idx ncols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_ref_h;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(::std::ostream &os,
const CSRToDenseInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class CSRToDenseTest
: public ::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
allocate(indptr, indptr_h.size());
allocate(indices, indices_h.size());
allocate(data, data_h.size());
update_device(indptr, indptr_h.data(), indptr_h.size(), stream);
update_device(indices, indices_h.data(), indices_h.size(), stream);
update_device(data, data_h.data(), data_h.size(), stream);
std::vector<value_t> out_ref_h = params.out_ref_h;
allocate(out_ref, out_ref_h.size());
update_device(out_ref, out_ref_h.data(), out_ref_h.size(), stream);
allocate(out, out_ref_h.size());
}
void SetUp() override {
params = ::testing::TestWithParam<
CSRToDenseInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(cudaStreamCreate(&stream));
CUSPARSE_CHECK(cusparseCreate(&handle));
make_data();
convert::csr_to_dense(handle, params.nrows, params.ncols, indptr, indices,
data, params.nrows, out, stream, true);
CUDA_CHECK(cudaStreamSynchronize(stream));
CUSPARSE_CHECK(cusparseDestroy(handle));
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(indptr));
CUDA_CHECK(cudaFree(indices));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_ref));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out, out_ref, params.out_ref_h.size(), Compare<value_t>()));
}
protected:
cudaStream_t stream;
cusparseHandle_t handle;
// input data
value_idx *indptr, *indices;
value_t *data;
// output data
value_t *out;
// expected output data
value_t *out_ref;
CSRToDenseInputs<value_idx, value_t> params;
};
const std::vector<CSRToDenseInputs<int, float>> inputs_i32_f = {
{4,
4,
{0, 2, 4, 6, 8},
{0, 1, 2, 3, 0, 1, 2, 3}, // indices
{1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f},
{1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 5.0f, 50.0f, 28.0f, 0.0f, 0.0f,
0.0f, 0.0f, 16.0f, 2.0f}},
};
typedef CSRToDenseTest<int, float> CSRToDenseTestF;
TEST_P(CSRToDenseTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(CSRToDenseTest, CSRToDenseTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace sparse
}; // end namespace raft
|
a63c552d6a188d4c14824e755cded7221787eaa9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
/***************** Adapted by Charles Shang *********************/
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interp_cuda(
const T *data,
const T x,
const T y,
const int width,
const int height)
{
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernelCuda(
const int count,
const T *bottom_data,
const T spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const T *bottom_rois, const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
T *top_data,
T *top_count)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
T val = bilinear_interp_cuda(offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernelCuda(
const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
T *bottom_data_diff, T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0)
{
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans)
{
continue;
}
T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
diff_y *= roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);
}
}
}
}
std::tuple<at::Tensor, at::Tensor>
dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input,
const at::Tensor &bbox,
const at::Tensor &trans,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(bbox.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
auto pooled_height = pooled_size;
auto pooled_width = pooled_size;
auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
long out_size = num_bbox * output_dim * pooled_height * pooled_width;
auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (out.numel() == 0)
{
THCudaCheck(hipGetLastError());
return std::make_tuple(out, top_count);
}
dim3 grid(::min(THCCeilDiv(out_size, 512L), 4096L));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES(input.type(), "dcn_v2_psroi_pooling_cuda_forward", [&] {
hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernelCuda<scalar_t>), dim3(grid), dim3(block), 0, stream,
out_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height, width,
pooled_height,
pooled_width,
bbox.contiguous().data<scalar_t>(),
trans.contiguous().data<scalar_t>(),
no_trans,
trans_std,
sample_per_part,
output_dim,
group_size,
part_size,
num_classes,
channels_each_class,
out.data<scalar_t>(),
top_count.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return std::make_tuple(out, top_count);
}
std::tuple<at::Tensor, at::Tensor>
dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad,
const at::Tensor &input,
const at::Tensor &bbox,
const at::Tensor &trans,
const at::Tensor &top_count,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
AT_ASSERTM(out_grad.type().is_cuda(), "out_grad must be a CUDA tensor");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(bbox.type().is_cuda(), "bbox must be a CUDA tensor");
AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor");
AT_ASSERTM(top_count.type().is_cuda(), "top_count must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
auto pooled_height = pooled_size;
auto pooled_width = pooled_size;
long out_size = num_bbox * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options());
auto trans_grad = at::zeros_like(trans);
if (input_grad.numel() == 0)
{
THCudaCheck(hipGetLastError());
return std::make_tuple(input_grad, trans_grad);
}
dim3 grid(::min(THCCeilDiv(out_size, 512L), 4096L));
dim3 block(512);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(out_grad.type(), "dcn_v2_psroi_pooling_cuda_backward", [&] {
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernelCuda<scalar_t>), dim3(grid), dim3(block), 0, stream,
out_size,
out_grad.contiguous().data<scalar_t>(),
top_count.contiguous().data<scalar_t>(),
num_bbox,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
input_grad.contiguous().data<scalar_t>(),
trans_grad.contiguous().data<scalar_t>(),
input.contiguous().data<scalar_t>(),
bbox.contiguous().data<scalar_t>(),
trans.contiguous().data<scalar_t>(),
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class);
});
THCudaCheck(hipGetLastError());
return std::make_tuple(input_grad, trans_grad);
} | a63c552d6a188d4c14824e755cded7221787eaa9.cu | /*!
* Copyright (c) 2017 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
/***************** Adapted by Charles Shang *********************/
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interp_cuda(
const T *data,
const T x,
const T y,
const int width,
const int height)
{
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 +
dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernelCuda(
const int count,
const T *bottom_data,
const T spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const T *bottom_rois, const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
T *top_data,
T *top_count)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
T val = bilinear_interp_cuda(offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernelCuda(
const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
T *bottom_data_diff, T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0)
{
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans)
{
continue;
}
T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
diff_y *= roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);
}
}
}
}
std::tuple<at::Tensor, at::Tensor>
dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input,
const at::Tensor &bbox,
const at::Tensor &trans,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(bbox.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
auto pooled_height = pooled_size;
auto pooled_width = pooled_size;
auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
long out_size = num_bbox * output_dim * pooled_height * pooled_width;
auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (out.numel() == 0)
{
THCudaCheck(cudaGetLastError());
return std::make_tuple(out, top_count);
}
dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES(input.type(), "dcn_v2_psroi_pooling_cuda_forward", [&] {
DeformablePSROIPoolForwardKernelCuda<scalar_t><<<grid, block, 0, stream>>>(
out_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height, width,
pooled_height,
pooled_width,
bbox.contiguous().data<scalar_t>(),
trans.contiguous().data<scalar_t>(),
no_trans,
trans_std,
sample_per_part,
output_dim,
group_size,
part_size,
num_classes,
channels_each_class,
out.data<scalar_t>(),
top_count.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(out, top_count);
}
std::tuple<at::Tensor, at::Tensor>
dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad,
const at::Tensor &input,
const at::Tensor &bbox,
const at::Tensor &trans,
const at::Tensor &top_count,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
AT_ASSERTM(out_grad.type().is_cuda(), "out_grad must be a CUDA tensor");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(bbox.type().is_cuda(), "bbox must be a CUDA tensor");
AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor");
AT_ASSERTM(top_count.type().is_cuda(), "top_count must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
auto pooled_height = pooled_size;
auto pooled_width = pooled_size;
long out_size = num_bbox * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options());
auto trans_grad = at::zeros_like(trans);
if (input_grad.numel() == 0)
{
THCudaCheck(cudaGetLastError());
return std::make_tuple(input_grad, trans_grad);
}
dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
dim3 block(512);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(out_grad.type(), "dcn_v2_psroi_pooling_cuda_backward", [&] {
DeformablePSROIPoolBackwardAccKernelCuda<scalar_t><<<grid, block, 0, stream>>>(
out_size,
out_grad.contiguous().data<scalar_t>(),
top_count.contiguous().data<scalar_t>(),
num_bbox,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
input_grad.contiguous().data<scalar_t>(),
trans_grad.contiguous().data<scalar_t>(),
input.contiguous().data<scalar_t>(),
bbox.contiguous().data<scalar_t>(),
trans.contiguous().data<scalar_t>(),
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class);
});
THCudaCheck(cudaGetLastError());
return std::make_tuple(input_grad, trans_grad);
} |
2ee7f21919d299950591405ab72b864ac8eb164e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @author Martin Gurtner
* @author Viktor-Adam Koropecky
*/
#include <iostream>
#include <unistd.h>
#include <cmath>
#include <pthread.h>
#include "imgproc_thread.h"
#include "sockpp/udp_socket.h"
#include "BeadsFinder.h"
#include "BackPropagator.h"
using namespace std;
using namespace std::chrono;
void imgproc_thread(AppData& appData){
if(appData.params.debug) printf("INFO: imgproc_thread: started\n");
if(appData.params.rtprio) {
struct sched_param schparam;
schparam.sched_priority = 50;
if(appData.params.debug) printf("INFO: imgproc_thread: setting rt priority to %d\n", schparam.sched_priority);
int s = pthread_setschedparam(pthread_self(), SCHED_FIFO, &schparam);
if (s != 0) fprintf(stderr, "WARNING: setting the priority of image processing thread failed.\n");
}
sockpp::udp_socket udp_sock;
if (!udp_sock) {
cerr << "ERROR: creating the UDP v4 socket: " << udp_sock.last_error_str() << endl;
}
int sendbuff = 2*sizeof(uint8_t)*appData.get_area();
socklen_t optlen = sizeof(sendbuff);
if(!udp_sock.set_option(SOL_SOCKET, SO_SNDBUF, &sendbuff, optlen)) {
cerr << "ERROR: failed to increase the send buffer size for the UDP communication " << udp_sock.last_error_str() << endl;
}
char coords_buffer[2*sizeof(uint16_t)*MAX_NUMBER_BEADS + sizeof(uint32_t)];
while(!appData.appStateIs(AppData::AppState::EXITING)) {
if(appData.params.debug) printf("INFO: imgproc_thread: waiting for entering the INITIALIZING state\n");
// Wait till the app enters the INITIALIZING state. If this fails (which could happen only in case of entering the EXITING state), break the loop.
if(!appData.waitTillState(AppData::AppState::INITIALIZING)) break;
// At this point, the app is in the AppData::AppState::INITIALIZING state, thus we initialize all needed stuff
// Initialize the BackPropagator for the green image
BackPropagator backprop_G(appData.params.img_width, appData.params.img_height, LAMBDA_GREEN, (float)appData.params.backprop_z_G/1000000.0f);
// Initialize the BackPropagator for the red image
BackPropagator backprop_R(appData.params.img_width, appData.params.img_height, LAMBDA_RED, (float)appData.params.backprop_z_R/1000000.0f);
// Initialize the BeadFinders
BeadsFinder beadsFinder_G(appData.params.img_width, appData.params.img_height, (uint8_t)appData.params.improc_thrs_G, (float)appData.params.improc_gaussFiltSigma_G);
BeadsFinder beadsFinder_R(appData.params.img_width, appData.params.img_height, (uint8_t)appData.params.improc_thrs_R, (float)appData.params.improc_gaussFiltSigma_R);
// Allocate the memory for the images
appData.img[ImageType::RAW_G].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::RAW_R].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::BACKPROP_G].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::BACKPROP_R].create(appData.params.img_width, appData.params.img_height);
// Initialize the counters for measuring the cycle duration and jitter
double iteration_count = 0;
double avg_cycle_duration_us = 0;
double avg_jitter_us = 0;
double cycle_period_us = 1e6/((double)appData.params.cam_FPS);
// Set the flag indicating that the camera was initialized
appData.imgproc_is_initialized = true;
if(appData.params.debug) printf("INFO: imgproc_thread: waiting till other App components are initialized\n");
// Wait till all the components of the App are initialized. If this fails, break the loop.
if(!appData.waitTillAppIsInitialized()) break;
// At this point, the app is in the AppData::AppState::RUNNING state as the App enters RUNNING state automatically when all components are initialized.
if(appData.params.debug) printf("INFO: imgproc_thread: entering the running stage\n");
while(appData.appStateIs(AppData::AppState::RUNNING)) {
auto t_cycle_start = steady_clock::now();
// wait till a new image is ready
std::unique_lock<std::mutex> lk(appData.cam_mtx);
appData.cam_cv.wait(lk);
// unlock the mutex so that the camera thread can proceed to capture a new image
lk.unlock();
// If the app entered the EXITING state, break the loop and finish the thread
if(appData.appStateIs(AppData::AppState::EXITING)) break;
// Make copies of red and green channel
auto t_cp_start = steady_clock::now();
appData.camIG.copyTo(appData.img[ImageType::RAW_G]);
appData.camIR.copyTo(appData.img[ImageType::RAW_R]);
auto t_cp_end = steady_clock::now();
// process the image
// backprop
auto t_backprop_start = steady_clock::now();
backprop_G.backprop(appData.img[ImageType::RAW_G], appData.img[ImageType::BACKPROP_G]);
backprop_R.backprop(appData.img[ImageType::RAW_R], appData.img[ImageType::BACKPROP_R]);
hipDeviceSynchronize();
auto t_backprop_end = steady_clock::now();
// find the beads (if enabled)
auto t_beadsfinder_start = steady_clock::now();
if(appData.params.beadsearch_G) {
beadsFinder_G.findBeads(appData.img[ImageType::BACKPROP_G]);
{ // Limit the scope of the mutex
std::lock_guard<std::mutex> mtx_bp(appData.mtx_bp_G);
beadsFinder_G.copyPositionsTo(appData.bead_positions_G);
appData.beadTracker_G.update(appData.bead_positions_G);
}
}
if(appData.params.beadsearch_R) {
beadsFinder_R.findBeads(appData.img[ImageType::BACKPROP_R]);
{ // Limit the scope of the mutex
std::lock_guard<std::mutex> mtx_bp(appData.mtx_bp_R);
beadsFinder_R.copyPositionsTo(appData.bead_positions_R);
appData.beadTracker_R.update(appData.bead_positions_R);
}
}
auto t_beadsfinder_end = steady_clock::now();
// Send the images to the subscribers
for(auto const& subs: appData.img_subs) {
ImageType imgType = subs.first;
bool img_sync = false;
for(auto const& sub_addr: subs.second) {
for (size_t i=0; i<128; ++i) {
// synchronize the image (copy it from the device memory to the host memory) only if it hasn't been already synchronized
ssize_t sent_bytes = udp_sock.send_to(appData.img[imgType].hostPtr(!img_sync) + i*1024*8, sizeof(uint8_t)*appData.get_area()/128, sub_addr);
img_sync = true;
}
if(appData.params.debug) cout << "INFO: sending image via UDP to " << sub_addr << endl;
}
}
// Send the coordinates to the subscribers
if (!appData.coords_subs.empty()) {
uint32_t *beadCountP = (uint32_t*)coords_buffer;
const vector<Position>& bp = appData.beadTracker_G.getBeadPositions();
// Store the number of tracked objects
*beadCountP = (uint32_t)bp.size();
// Copy the tracked positions to the coords_buffer
memcpy(coords_buffer+sizeof(uint32_t), bp.data(), 2*(*beadCountP)*sizeof(uint16_t));
for(auto const& sub_addr: appData.coords_subs) {
ssize_t sent_bytes = udp_sock.send_to(coords_buffer, sizeof(uint32_t) + 2*(*beadCountP)*sizeof(uint16_t), sub_addr);
if(appData.params.debug) cout << "INFO: sending coordinates via UDP to " << sub_addr << " - " << sent_bytes << " bytes sent" << endl;
}
}
auto t_cycle_end = steady_clock::now();
auto cycle_elapsed_seconds = t_cycle_end - t_cycle_start;
avg_cycle_duration_us += 1/(iteration_count+1)*(duration_cast<microseconds>(cycle_elapsed_seconds).count() - avg_cycle_duration_us);
avg_jitter_us += 1/(iteration_count+1)*( abs(cycle_period_us - duration_cast<microseconds>(cycle_elapsed_seconds).count()) - avg_jitter_us);
iteration_count++;
if(appData.params.verbose) {
printf("TRACE: Backprop: %6.3f ms", duration_cast<microseconds>(t_backprop_end - t_backprop_start).count()/1000.0);
printf("| BF.findBeads: %6.3f ms", duration_cast<microseconds>(t_beadsfinder_end - t_beadsfinder_start).count()/1000.0);
printf("| cp: %6.3f ms", duration_cast<microseconds>(t_cp_end - t_cp_start).count()/1000.0);
printf("| whole cycle: %6.3f ms", duration_cast<microseconds>(cycle_elapsed_seconds).count()/1000.0);
printf("| #points: (%d, %d)", (int)appData.bead_positions_G.size(), (int)appData.bead_positions_R.size());
printf("\n");
}
}
printf("Average cycle duration: %6.3f ms| Average jitter: %6.3f ms\n", avg_cycle_duration_us/1000.0, avg_jitter_us/1000.0);
appData.imgproc_is_initialized = false;
}
if(appData.params.debug) printf("INFO: imgproc_thread: ended\n");
}
| 2ee7f21919d299950591405ab72b864ac8eb164e.cu | /**
* @author Martin Gurtner
* @author Viktor-Adam Koropecky
*/
#include <iostream>
#include <unistd.h>
#include <cmath>
#include <pthread.h>
#include "imgproc_thread.h"
#include "sockpp/udp_socket.h"
#include "BeadsFinder.h"
#include "BackPropagator.h"
using namespace std;
using namespace std::chrono;
void imgproc_thread(AppData& appData){
if(appData.params.debug) printf("INFO: imgproc_thread: started\n");
if(appData.params.rtprio) {
struct sched_param schparam;
schparam.sched_priority = 50;
if(appData.params.debug) printf("INFO: imgproc_thread: setting rt priority to %d\n", schparam.sched_priority);
int s = pthread_setschedparam(pthread_self(), SCHED_FIFO, &schparam);
if (s != 0) fprintf(stderr, "WARNING: setting the priority of image processing thread failed.\n");
}
sockpp::udp_socket udp_sock;
if (!udp_sock) {
cerr << "ERROR: creating the UDP v4 socket: " << udp_sock.last_error_str() << endl;
}
int sendbuff = 2*sizeof(uint8_t)*appData.get_area();
socklen_t optlen = sizeof(sendbuff);
if(!udp_sock.set_option(SOL_SOCKET, SO_SNDBUF, &sendbuff, optlen)) {
cerr << "ERROR: failed to increase the send buffer size for the UDP communication " << udp_sock.last_error_str() << endl;
}
char coords_buffer[2*sizeof(uint16_t)*MAX_NUMBER_BEADS + sizeof(uint32_t)];
while(!appData.appStateIs(AppData::AppState::EXITING)) {
if(appData.params.debug) printf("INFO: imgproc_thread: waiting for entering the INITIALIZING state\n");
// Wait till the app enters the INITIALIZING state. If this fails (which could happen only in case of entering the EXITING state), break the loop.
if(!appData.waitTillState(AppData::AppState::INITIALIZING)) break;
// At this point, the app is in the AppData::AppState::INITIALIZING state, thus we initialize all needed stuff
// Initialize the BackPropagator for the green image
BackPropagator backprop_G(appData.params.img_width, appData.params.img_height, LAMBDA_GREEN, (float)appData.params.backprop_z_G/1000000.0f);
// Initialize the BackPropagator for the red image
BackPropagator backprop_R(appData.params.img_width, appData.params.img_height, LAMBDA_RED, (float)appData.params.backprop_z_R/1000000.0f);
// Initialize the BeadFinders
BeadsFinder beadsFinder_G(appData.params.img_width, appData.params.img_height, (uint8_t)appData.params.improc_thrs_G, (float)appData.params.improc_gaussFiltSigma_G);
BeadsFinder beadsFinder_R(appData.params.img_width, appData.params.img_height, (uint8_t)appData.params.improc_thrs_R, (float)appData.params.improc_gaussFiltSigma_R);
// Allocate the memory for the images
appData.img[ImageType::RAW_G].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::RAW_R].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::BACKPROP_G].create(appData.params.img_width, appData.params.img_height);
appData.img[ImageType::BACKPROP_R].create(appData.params.img_width, appData.params.img_height);
// Initialize the counters for measuring the cycle duration and jitter
double iteration_count = 0;
double avg_cycle_duration_us = 0;
double avg_jitter_us = 0;
double cycle_period_us = 1e6/((double)appData.params.cam_FPS);
// Set the flag indicating that the camera was initialized
appData.imgproc_is_initialized = true;
if(appData.params.debug) printf("INFO: imgproc_thread: waiting till other App components are initialized\n");
// Wait till all the components of the App are initialized. If this fails, break the loop.
if(!appData.waitTillAppIsInitialized()) break;
// At this point, the app is in the AppData::AppState::RUNNING state as the App enters RUNNING state automatically when all components are initialized.
if(appData.params.debug) printf("INFO: imgproc_thread: entering the running stage\n");
while(appData.appStateIs(AppData::AppState::RUNNING)) {
auto t_cycle_start = steady_clock::now();
// wait till a new image is ready
std::unique_lock<std::mutex> lk(appData.cam_mtx);
appData.cam_cv.wait(lk);
// unlock the mutex so that the camera thread can proceed to capture a new image
lk.unlock();
// If the app entered the EXITING state, break the loop and finish the thread
if(appData.appStateIs(AppData::AppState::EXITING)) break;
// Make copies of red and green channel
auto t_cp_start = steady_clock::now();
appData.camIG.copyTo(appData.img[ImageType::RAW_G]);
appData.camIR.copyTo(appData.img[ImageType::RAW_R]);
auto t_cp_end = steady_clock::now();
// process the image
// backprop
auto t_backprop_start = steady_clock::now();
backprop_G.backprop(appData.img[ImageType::RAW_G], appData.img[ImageType::BACKPROP_G]);
backprop_R.backprop(appData.img[ImageType::RAW_R], appData.img[ImageType::BACKPROP_R]);
cudaDeviceSynchronize();
auto t_backprop_end = steady_clock::now();
// find the beads (if enabled)
auto t_beadsfinder_start = steady_clock::now();
if(appData.params.beadsearch_G) {
beadsFinder_G.findBeads(appData.img[ImageType::BACKPROP_G]);
{ // Limit the scope of the mutex
std::lock_guard<std::mutex> mtx_bp(appData.mtx_bp_G);
beadsFinder_G.copyPositionsTo(appData.bead_positions_G);
appData.beadTracker_G.update(appData.bead_positions_G);
}
}
if(appData.params.beadsearch_R) {
beadsFinder_R.findBeads(appData.img[ImageType::BACKPROP_R]);
{ // Limit the scope of the mutex
std::lock_guard<std::mutex> mtx_bp(appData.mtx_bp_R);
beadsFinder_R.copyPositionsTo(appData.bead_positions_R);
appData.beadTracker_R.update(appData.bead_positions_R);
}
}
auto t_beadsfinder_end = steady_clock::now();
// Send the images to the subscribers
for(auto const& subs: appData.img_subs) {
ImageType imgType = subs.first;
bool img_sync = false;
for(auto const& sub_addr: subs.second) {
for (size_t i=0; i<128; ++i) {
// synchronize the image (copy it from the device memory to the host memory) only if it hasn't been already synchronized
ssize_t sent_bytes = udp_sock.send_to(appData.img[imgType].hostPtr(!img_sync) + i*1024*8, sizeof(uint8_t)*appData.get_area()/128, sub_addr);
img_sync = true;
}
if(appData.params.debug) cout << "INFO: sending image via UDP to " << sub_addr << endl;
}
}
// Send the coordinates to the subscribers
if (!appData.coords_subs.empty()) {
uint32_t *beadCountP = (uint32_t*)coords_buffer;
const vector<Position>& bp = appData.beadTracker_G.getBeadPositions();
// Store the number of tracked objects
*beadCountP = (uint32_t)bp.size();
// Copy the tracked positions to the coords_buffer
memcpy(coords_buffer+sizeof(uint32_t), bp.data(), 2*(*beadCountP)*sizeof(uint16_t));
for(auto const& sub_addr: appData.coords_subs) {
ssize_t sent_bytes = udp_sock.send_to(coords_buffer, sizeof(uint32_t) + 2*(*beadCountP)*sizeof(uint16_t), sub_addr);
if(appData.params.debug) cout << "INFO: sending coordinates via UDP to " << sub_addr << " - " << sent_bytes << " bytes sent" << endl;
}
}
auto t_cycle_end = steady_clock::now();
auto cycle_elapsed_seconds = t_cycle_end - t_cycle_start;
avg_cycle_duration_us += 1/(iteration_count+1)*(duration_cast<microseconds>(cycle_elapsed_seconds).count() - avg_cycle_duration_us);
avg_jitter_us += 1/(iteration_count+1)*( abs(cycle_period_us - duration_cast<microseconds>(cycle_elapsed_seconds).count()) - avg_jitter_us);
iteration_count++;
if(appData.params.verbose) {
printf("TRACE: Backprop: %6.3f ms", duration_cast<microseconds>(t_backprop_end - t_backprop_start).count()/1000.0);
printf("| BF.findBeads: %6.3f ms", duration_cast<microseconds>(t_beadsfinder_end - t_beadsfinder_start).count()/1000.0);
printf("| cp: %6.3f ms", duration_cast<microseconds>(t_cp_end - t_cp_start).count()/1000.0);
printf("| whole cycle: %6.3f ms", duration_cast<microseconds>(cycle_elapsed_seconds).count()/1000.0);
printf("| #points: (%d, %d)", (int)appData.bead_positions_G.size(), (int)appData.bead_positions_R.size());
printf("\n");
}
}
printf("Average cycle duration: %6.3f ms| Average jitter: %6.3f ms\n", avg_cycle_duration_us/1000.0, avg_jitter_us/1000.0);
appData.imgproc_is_initialized = false;
}
if(appData.params.debug) printf("INFO: imgproc_thread: ended\n");
}
|
52f17752ba93dbbd58f3ffb7adf27373fb96dce6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.hpp"
// TODO CUDA kernel implementing axpy
// y = y + alpha*x
template <typename T>
__global__
void axpy_kernel(T* y, T* x, T a, int n){
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < n){
y[idx] += a * x[idx];
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
hipInit(0);
std::cout << "memcopy and daxpy test of size " << n << "\n";
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// TODO calculate grid dimensions
// IGNORE for the first kernel writing exercise
auto block_dim = 64;
auto grid_dim = (n + (block_dim - 1)) / block_dim;
// synchronize the host and device so that the timings are accurate
hipDeviceSynchronize();
start = get_time();
// TODO launch kernel (alpha=2.0)
hipLaunchKernelGGL(( axpy_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, y_device, x_device, 2.0, n);
hipDeviceSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D : " << time_H2D << " s\n";
std::cout << "D2H : " << time_D2H << " s\n";
std::cout << "axpy : " << time_axpy << " s\n";
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n";
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------\n";
auto H2D_BW = size_in_bytes/1e6*2 / time_H2D;
auto D2H_BW = size_in_bytes/1e6 / time_D2H;
std::cout << "H2D BW : " << H2D_BW << " MB/s\n";
std::cout << "D2H BW : " << D2H_BW << " MB/s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(::fabs(6.-y[i])>1e-15) {
++errors;
}
}
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
hipFree(x_device);
hipFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
| 52f17752ba93dbbd58f3ffb7adf27373fb96dce6.cu | #include <iostream>
#include <cuda.h>
#include "util.hpp"
// TODO CUDA kernel implementing axpy
// y = y + alpha*x
template <typename T>
__global__
void axpy_kernel(T* y, T* x, T a, int n){
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < n){
y[idx] += a * x[idx];
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
cuInit(0);
std::cout << "memcopy and daxpy test of size " << n << "\n";
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// TODO calculate grid dimensions
// IGNORE for the first kernel writing exercise
auto block_dim = 64;
auto grid_dim = (n + (block_dim - 1)) / block_dim;
// synchronize the host and device so that the timings are accurate
cudaDeviceSynchronize();
start = get_time();
// TODO launch kernel (alpha=2.0)
axpy_kernel<<<grid_dim, block_dim>>>(y_device, x_device, 2.0, n);
cudaDeviceSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D : " << time_H2D << " s\n";
std::cout << "D2H : " << time_D2H << " s\n";
std::cout << "axpy : " << time_axpy << " s\n";
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n";
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------\n";
auto H2D_BW = size_in_bytes/1e6*2 / time_H2D;
auto D2H_BW = size_in_bytes/1e6 / time_D2H;
std::cout << "H2D BW : " << H2D_BW << " MB/s\n";
std::cout << "D2H BW : " << D2H_BW << " MB/s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(std::fabs(6.-y[i])>1e-15) {
++errors;
}
}
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
cudaFree(x_device);
cudaFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
|
79c0220c9aafa3c2cd5352ca28b19bad85539c5f.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 79c0220c9aafa3c2cd5352ca28b19bad85539c5f.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
00bb69579632f857fd16bee3c64433f67d099930.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
#include <math.h>
// CUDA runtime
//#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
//**************************************************************************
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
//**************************************************************************
__global__ void transformacion_no_shared(float *A, float *B, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float Aim2, Aim1, Ai, Aip1, Aip2;
if (i < N)
{
Aim2 = (i - 2 < 0) ? 0.0 : A[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A[i + 2];
Ai = A[i];
B[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
}
//**************************************************************************
// Vector maximum kernel
__global__ void reduceMax(float *V_in, float *V_out, const int N)
{
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = ((i < N) ? V_in[i] : 0.0);
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
if (sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
__syncthreads();
}
if (tid == 0)
V_out[blockIdx.x] = sdata[0];
}
/**************************************************************************
**************************************************************************/
int main(int argc, char *argv[])
{
int blockSize, N;
if (argc != 3)
{
cerr << "Error en los argumentos: blockSize numValores" << endl;
return (-1);
}
else
{
blockSize = atoi(argv[1]);
//numBlocks = atoi(argv[2]);
N = atoi(argv[2]);
}
//N = blockSize * numBlocks;
//Get GPU information
int devID;
hipDeviceProp_t props;
hipError_t err;
err = hipGetDevice(&devID);
if (err != hipSuccess)
{
cout << "ERRORRR" << endl;
}
hipGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
cout << "Tamao bloque: " << blockSize << endl;
cout << "N valores: " << N << endl;
//Variables
int size = N * sizeof(float);
float *A = new float[N];
float *B = new float[N];
float *A_CPU = new float[N];
float *B_CPU = new float[N];
int blocks_per_grid = ceil(float(N) / blockSize);
float *B_out_red = new float[blocks_per_grid];
float *A_device = NULL;
float *B_device = NULL;
float *B_d_red_in = NULL;
float *B_d_red_out = NULL;
//Initialize vector A (GPU) y A (CPU)
for (int i = 0; i < N; i++)
{
A[i] = (float)(1 - (i % 100) * 0.001);
A_CPU[i] = (float)(1 - (i % 100) * 0.001);
}
//Reserve memory
err = hipMalloc((void **)&A_device, size);
if (err != hipSuccess)
{
cout << "ERROR RESERVA [A Device]" << endl;
}
err = hipMalloc((void **)&B_device, size);
if (err != hipSuccess)
{
cout << "ERROR RESERVA [B Device]" << endl;
}
err = hipMalloc((void **)&B_d_red_in, size);
if (err != hipSuccess)
{
cout << "ERROR RESERVA [A Device Reduction INPUT]" << endl;
}
err = hipMalloc((void **)&B_d_red_out, blocks_per_grid * sizeof(float));
if (err != hipSuccess)
{
cout << "ERROR RESERVA [A Device Reduction OUTPUT]" << endl;
}
/* ---------------------------------------------------------------------- */
/* ------------------------------ CPU phase ----------------------------- */
double t1 = cpuSecond();
float Ai, Aim1, Aim2, Aip1, Aip2;
float max = 0.0;
for (int i = 0; i < N; i++)
{
Aim2 = (i - 2 < 0) ? 0.0 : A_CPU[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A_CPU[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A_CPU[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A_CPU[i + 2];
Ai = A_CPU[i];
B_CPU[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
double Tcpu_max = cpuSecond() - t1;
for (int i = 0; i < N; i++)
{
max = (B_CPU[i] > max) ? B_CPU[i] : max;
}
cout << "Tiempo gastado CPU = " << Tcpu_max << endl;
cout << "Mximo: " << max << endl;
/* ---------------------------------------------------------------------- */
/* ------------------ GPU phase >>[No shared memory]<< ------------------ */
t1 = cpuSecond();
//Host A to Device
err = hipMemcpy(A_device, A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
cout << "ERROR COPIA [A Device]" << endl;
}
int threadsPerBlock = blockSize;
int blocksPerGrid = ceil((float)N / (float)threadsPerBlock);
hipLaunchKernelGGL(( transformacion_no_shared), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A_device, B_device, N);
//Device to Host
hipMemcpy(B, B_device, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//Time GPU
double Tgpu = cpuSecond() - t1;
cout << "Tiempo gastado GPU = " << Tgpu << endl
<< endl;
/* ------------------------------------------------------------------- */
// GPU REDUCTION PHASE
t1 = cpuSecond();
//Host to device
err = hipMemcpy(B_d_red_in, B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
cout << "ERROR COPIA A GPU REDUCTION" << endl;
}
int shared_mem_size = threadsPerBlock * sizeof(float);
hipLaunchKernelGGL(( reduceMax), dim3(blocksPerGrid), dim3(threadsPerBlock), shared_mem_size, 0, B_d_red_in, B_d_red_out, N);
hipDeviceSynchronize();
//Device to Host
hipMemcpy(B_out_red, B_d_red_out, blocks_per_grid * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
max = 0.0;
for (int i = 0; i < blocks_per_grid; i++)
max = (B_out_red[i] > max) ? B_out_red[i] : max;
//Time GPU Reduction
double Tgpu_reduction = cpuSecond() - t1;
cout << "Tiempo gastado GPU REDUCTION = " << Tgpu_reduction << endl;
cout << "Mximo: " << max << endl
<< endl;
cout << "Ganancia [TGPU]= " << Tcpu_max / Tgpu << endl;
cout << "Ganancia [TGPU reduction]= " << Tcpu_max / Tgpu_reduction << endl;
}
| 00bb69579632f857fd16bee3c64433f67d099930.cu | #include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
#include <math.h>
// CUDA runtime
//#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
//**************************************************************************
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
//**************************************************************************
__global__ void transformacion_no_shared(float *A, float *B, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float Aim2, Aim1, Ai, Aip1, Aip2;
if (i < N)
{
Aim2 = (i - 2 < 0) ? 0.0 : A[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A[i + 2];
Ai = A[i];
B[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
}
//**************************************************************************
// Vector maximum kernel
__global__ void reduceMax(float *V_in, float *V_out, const int N)
{
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = ((i < N) ? V_in[i] : 0.0);
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
if (sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
__syncthreads();
}
if (tid == 0)
V_out[blockIdx.x] = sdata[0];
}
/**************************************************************************
**************************************************************************/
int main(int argc, char *argv[])
{
int blockSize, N;
if (argc != 3)
{
cerr << "Error en los argumentos: blockSize numValores" << endl;
return (-1);
}
else
{
blockSize = atoi(argv[1]);
//numBlocks = atoi(argv[2]);
N = atoi(argv[2]);
}
//N = blockSize * numBlocks;
//Get GPU information
int devID;
cudaDeviceProp props;
cudaError_t err;
err = cudaGetDevice(&devID);
if (err != cudaSuccess)
{
cout << "ERRORRR" << endl;
}
cudaGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
cout << "Tamaño bloque: " << blockSize << endl;
cout << "Nº valores: " << N << endl;
//Variables
int size = N * sizeof(float);
float *A = new float[N];
float *B = new float[N];
float *A_CPU = new float[N];
float *B_CPU = new float[N];
int blocks_per_grid = ceil(float(N) / blockSize);
float *B_out_red = new float[blocks_per_grid];
float *A_device = NULL;
float *B_device = NULL;
float *B_d_red_in = NULL;
float *B_d_red_out = NULL;
//Initialize vector A (GPU) y A (CPU)
for (int i = 0; i < N; i++)
{
A[i] = (float)(1 - (i % 100) * 0.001);
A_CPU[i] = (float)(1 - (i % 100) * 0.001);
}
//Reserve memory
err = cudaMalloc((void **)&A_device, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device]" << endl;
}
err = cudaMalloc((void **)&B_device, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [B Device]" << endl;
}
err = cudaMalloc((void **)&B_d_red_in, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device Reduction INPUT]" << endl;
}
err = cudaMalloc((void **)&B_d_red_out, blocks_per_grid * sizeof(float));
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device Reduction OUTPUT]" << endl;
}
/* ---------------------------------------------------------------------- */
/* ------------------------------ CPU phase ----------------------------- */
double t1 = cpuSecond();
float Ai, Aim1, Aim2, Aip1, Aip2;
float max = 0.0;
for (int i = 0; i < N; i++)
{
Aim2 = (i - 2 < 0) ? 0.0 : A_CPU[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A_CPU[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A_CPU[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A_CPU[i + 2];
Ai = A_CPU[i];
B_CPU[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
double Tcpu_max = cpuSecond() - t1;
for (int i = 0; i < N; i++)
{
max = (B_CPU[i] > max) ? B_CPU[i] : max;
}
cout << "Tiempo gastado CPU = " << Tcpu_max << endl;
cout << "Máximo: " << max << endl;
/* ---------------------------------------------------------------------- */
/* ------------------ GPU phase >>[No shared memory]<< ------------------ */
t1 = cpuSecond();
//Host A to Device
err = cudaMemcpy(A_device, A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "ERROR COPIA [A Device]" << endl;
}
int threadsPerBlock = blockSize;
int blocksPerGrid = ceil((float)N / (float)threadsPerBlock);
transformacion_no_shared<<<blocksPerGrid, threadsPerBlock>>>(A_device, B_device, N);
//Device to Host
cudaMemcpy(B, B_device, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Time GPU
double Tgpu = cpuSecond() - t1;
cout << "Tiempo gastado GPU = " << Tgpu << endl
<< endl;
/* ------------------------------------------------------------------- */
// GPU REDUCTION PHASE
t1 = cpuSecond();
//Host to device
err = cudaMemcpy(B_d_red_in, B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "ERROR COPIA A GPU REDUCTION" << endl;
}
int shared_mem_size = threadsPerBlock * sizeof(float);
reduceMax<<<blocksPerGrid, threadsPerBlock, shared_mem_size>>>(B_d_red_in, B_d_red_out, N);
cudaDeviceSynchronize();
//Device to Host
cudaMemcpy(B_out_red, B_d_red_out, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
max = 0.0;
for (int i = 0; i < blocks_per_grid; i++)
max = (B_out_red[i] > max) ? B_out_red[i] : max;
//Time GPU Reduction
double Tgpu_reduction = cpuSecond() - t1;
cout << "Tiempo gastado GPU REDUCTION = " << Tgpu_reduction << endl;
cout << "Máximo: " << max << endl
<< endl;
cout << "Ganancia [TGPU]= " << Tcpu_max / Tgpu << endl;
cout << "Ganancia [TGPU reduction]= " << Tcpu_max / Tgpu_reduction << endl;
}
|
fd99deb1f4863a9f0c5bd7b1d89c5e52693d8bbc.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <cstdint>
#include <iostream>
#include <stdexcept>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(hipStream_t stream, size_t workspace_size)
: provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
hipError_t error = hipGetDevice(&device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDevice() failed");
}
error = hipGetDeviceProperties(&device_, device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle&& handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle& Handle::operator=(Handle&& handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(hipStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
hipStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void* Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to
/// get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
hipError_t error =
hipMalloc((void**)&workspace_, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
hipError_t error = hipMemset(workspace_, 0, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const* Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const& desc) {
return ::max(::max(desc.A.alignment, desc.B.alignment),
desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies,
/// starting from a given upper limit.
static int gemm_problem_alignment(
int M, int N, int K, NumericTypeID element_A, void const* ptr_A,
int lda, int64_t batch_stride_A, NumericTypeID element_B,
void const* ptr_B, int ldb, int64_t batch_stride_B,
NumericTypeID element_C, void const* ptr_C, int ldc,
int64_t batch_stride_C, void const* ptr_D, int ldd,
int64_t batch_stride_D, int max_alignment_in_bytes = 16) {
void const* pointers[] = {ptr_A, ptr_B, ptr_C, ptr_D};
int64_t extents[] = {M,
N,
K,
lda,
ldb,
ldc,
ldd,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D};
NumericTypeID elements[] = {element_A, element_B, element_C};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const* ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment =
max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment =
::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const* find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const* operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const* op : cc_it->second) {
GemmDescription const& desc =
static_cast<GemmDescription const&>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix -
/// ignored for real-valued matrices
void const* ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix -
/// ignored for real-valued matrices
void const* ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const* ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void* ptr_D, /// Pointer to D matrix
int ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kGemm, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K, element_A, ptr_A, lda, 0, element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0, ptr_D, ldd, 0, kMaximumAlignmentSize);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{{M, N, K}, lda, ldb, ldc, ldd, 1};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A, ptr_B, ptr_C, ptr_D, alpha, beta, scalar_pointer_mode_};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K
// parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal
/// GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix -
/// ignored for real-valued matrices
void const* ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix -
/// ignored for real-valued matrices
void const* ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const* ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void* ptr_D, /// Pointer to D matrix
int ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kUniversal, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const* ptr_A_check = ptr_A;
void const* ptr_B_check = ptr_B;
void const* ptr_C_check = ptr_C;
void* ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the
// host, as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K, element_A, ptr_A_check, lda, 0, element_B, ptr_B_check,
ldb, 0, element_C, ptr_C_check, ldc, 0, ptr_D_check, ldd, 0,
kMaximumAlignmentSize);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{mode, {M, N, K}, batch_count, lda,
ldb, ldc, ldd};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmUniversalArguments arguments{ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix
void const* ptr_A_real, /// Pointer to real part of A matrix
void const* ptr_A_imag, /// Pointer to imaginary part of A matrix
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix
void const* ptr_B_real, /// Pointer to real part of B matrix
void const* ptr_B_imag, /// Pointer to imaginary part of B matrix
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const* ptr_C_real, /// Pointer to real part of C matrix
void const* ptr_C_imag, /// Pointer to imaginary part of C matrix
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void* ptr_D_real, /// Pointer to real part of D matrix
void* ptr_D_imag, /// Pointer to imaginary part of D matrix
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real, int64_t batch_stride_A_imag,
int64_t batch_stride_B_real, int64_t batch_stride_B_imag,
int64_t batch_stride_C_real, int64_t batch_stride_C_imag,
int64_t batch_stride_D_real, int64_t batch_stride_D_imag) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kPlanarComplex, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(M, N, K, element_A, ptr_A_real, lda_real,
batch_stride_A_real, element_B, ptr_B_real,
ldb_real, batch_stride_B_real, element_C,
ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real,
kMaximumAlignmentSize),
gemm_problem_alignment(M, N, K, element_A, ptr_A_imag, lda_imag,
batch_stride_A_imag, element_B, ptr_B_imag,
ldb_imag, batch_stride_B_imag, element_C,
ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag,
kMaximumAlignmentSize));
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA
/// grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA
/// grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const* M, /// Array containing the GEMM M dimension for each batch
/// index
int const* N, /// Array containing the GEMM N dimension for each batch
/// index
int const* K, /// Array containing the GEMM K dimension for each batch
/// index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix
void const* const* ptr_A_real, /// Pointer to array containing pointers
/// to real part of A matrices
void const* const* ptr_A_imag, /// Pointer to array containing pointers
/// to imaginary part of A matrices
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix
void const* const* ptr_B_real, /// Pointer to array containing pointers
/// to real part of B matrices
void const* const* ptr_B_imag, /// Pointer to array containing pointers
/// to imaginary part of B matrices
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const* const* ptr_C_real, /// Pointer to array containing pointers
/// to real part of C matrices
void const* const* ptr_C_imag, /// Pointer to array containing poitners
/// to imaginary part of C matrices
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void* const* ptr_D_real, /// Pointer to array containing pointers to
/// real part of D matrices
void* const* ptr_D_imag, /// Pointer to array containing poitners to
/// imaginary part of D matrices
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kPlanarComplexArray,
element_compute, element_scalar, element_A, layout_A,
transform_A, element_B, layout_B, transform_B,
element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0, element_B,
nullptr, ldb_real, 0, element_C, nullptr,
ldc_real, 0, nullptr, ldd_real, 0,
kMaximumAlignmentSize),
gemm_problem_alignment(expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0, element_B,
nullptr, ldb_imag, 0, element_C, nullptr,
ldc_imag, 0, nullptr, ldd_imag, 0,
kMaximumAlignmentSize));
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K, ptr_A_real, ptr_A_imag,
ptr_B_real, ptr_B_imag, ptr_C_real, ptr_C_imag, ptr_D_real,
ptr_D_imag, alpha, beta, scalar_pointer_mode_};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| fd99deb1f4863a9f0c5bd7b1d89c5e52693d8bbc.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <cstdint>
#include <iostream>
#include <stdexcept>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(cudaStream_t stream, size_t workspace_size)
: provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
cudaError_t error = cudaGetDevice(&device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
error = cudaGetDeviceProperties(&device_, device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle&& handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle& Handle::operator=(Handle&& handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(cudaStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
cudaStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void* Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to
/// get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
cudaError_t error =
cudaMalloc((void**)&workspace_, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
cudaError_t error = cudaMemset(workspace_, 0, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const* Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const& desc) {
return std::max(std::max(desc.A.alignment, desc.B.alignment),
desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies,
/// starting from a given upper limit.
static int gemm_problem_alignment(
int M, int N, int K, NumericTypeID element_A, void const* ptr_A,
int lda, int64_t batch_stride_A, NumericTypeID element_B,
void const* ptr_B, int ldb, int64_t batch_stride_B,
NumericTypeID element_C, void const* ptr_C, int ldc,
int64_t batch_stride_C, void const* ptr_D, int ldd,
int64_t batch_stride_D, int max_alignment_in_bytes = 16) {
void const* pointers[] = {ptr_A, ptr_B, ptr_C, ptr_D};
int64_t extents[] = {M,
N,
K,
lda,
ldb,
ldc,
ldd,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D};
NumericTypeID elements[] = {element_A, element_B, element_C};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const* ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment =
max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment =
std::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const* find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const* operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const* op : cc_it->second) {
GemmDescription const& desc =
static_cast<GemmDescription const&>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix -
/// ignored for real-valued matrices
void const* ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix -
/// ignored for real-valued matrices
void const* ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const* ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void* ptr_D, /// Pointer to D matrix
int ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kGemm, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K, element_A, ptr_A, lda, 0, element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0, ptr_D, ldd, 0, kMaximumAlignmentSize);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{{M, N, K}, lda, ldb, ldc, ldd, 1};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A, ptr_B, ptr_C, ptr_D, alpha, beta, scalar_pointer_mode_};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K
// parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal
/// GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix -
/// ignored for real-valued matrices
void const* ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix -
/// ignored for real-valued matrices
void const* ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const* ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void* ptr_D, /// Pointer to D matrix
int ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kUniversal, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const* ptr_A_check = ptr_A;
void const* ptr_B_check = ptr_B;
void const* ptr_C_check = ptr_C;
void* ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the
// host, as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K, element_A, ptr_A_check, lda, 0, element_B, ptr_B_check,
ldb, 0, element_C, ptr_C_check, ldc, 0, ptr_D_check, ldd, 0,
kMaximumAlignmentSize);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{mode, {M, N, K}, batch_count, lda,
ldb, ldc, ldd};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmUniversalArguments arguments{ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix
void const* ptr_A_real, /// Pointer to real part of A matrix
void const* ptr_A_imag, /// Pointer to imaginary part of A matrix
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix
void const* ptr_B_real, /// Pointer to real part of B matrix
void const* ptr_B_imag, /// Pointer to imaginary part of B matrix
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const* ptr_C_real, /// Pointer to real part of C matrix
void const* ptr_C_imag, /// Pointer to imaginary part of C matrix
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void* ptr_D_real, /// Pointer to real part of D matrix
void* ptr_D_imag, /// Pointer to imaginary part of D matrix
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real, int64_t batch_stride_A_imag,
int64_t batch_stride_B_real, int64_t batch_stride_B_imag,
int64_t batch_stride_C_real, int64_t batch_stride_C_imag,
int64_t batch_stride_D_real, int64_t batch_stride_D_imag) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kPlanarComplex, element_compute,
element_scalar, element_A, layout_A, transform_A,
element_B, layout_B, transform_B, element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(M, N, K, element_A, ptr_A_real, lda_real,
batch_stride_A_real, element_B, ptr_B_real,
ldb_real, batch_stride_B_real, element_C,
ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real,
kMaximumAlignmentSize),
gemm_problem_alignment(M, N, K, element_A, ptr_A_imag, lda_imag,
batch_stride_A_imag, element_B, ptr_B_imag,
ldb_imag, batch_stride_B_imag, element_C,
ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag,
kMaximumAlignmentSize));
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA
/// grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA
/// grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const* M, /// Array containing the GEMM M dimension for each batch
/// index
int const* N, /// Array containing the GEMM N dimension for each batch
/// index
int const* K, /// Array containing the GEMM K dimension for each batch
/// index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const* alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform
transform_A, /// Complex transformation applied to A matrix
void const* const* ptr_A_real, /// Pointer to array containing pointers
/// to real part of A matrices
void const* const* ptr_A_imag, /// Pointer to array containing pointers
/// to imaginary part of A matrices
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform
transform_B, /// Complex transformation applied to B matrix
void const* const* ptr_B_real, /// Pointer to array containing pointers
/// to real part of B matrices
void const* const* ptr_B_imag, /// Pointer to array containing pointers
/// to imaginary part of B matrices
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const* beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const* const* ptr_C_real, /// Pointer to array containing pointers
/// to real part of C matrices
void const* const* ptr_C_imag, /// Pointer to array containing poitners
/// to imaginary part of C matrices
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void* const* ptr_D_real, /// Pointer to array containing pointers to
/// real part of D matrices
void* const* ptr_D_imag, /// Pointer to array containing poitners to
/// imaginary part of D matrices
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(provider_, GemmKind::kPlanarComplexArray,
element_compute, element_scalar, element_A, layout_A,
transform_A, element_B, layout_B, transform_B,
element_C);
auto operators_it =
Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it ==
Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0, element_B,
nullptr, ldb_real, 0, element_C, nullptr,
ldc_real, 0, nullptr, ldd_real, 0,
kMaximumAlignmentSize),
gemm_problem_alignment(expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0, element_B,
nullptr, ldb_imag, 0, element_C, nullptr,
ldc_imag, 0, nullptr, ldd_imag, 0,
kMaximumAlignmentSize));
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const* operation =
find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag};
// Query host work space size
uint64_t host_workspace_size_needed =
operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed =
operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(&configuration, host_workspace,
workspace_, stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K, ptr_A_real, ptr_A_imag,
ptr_B_real, ptr_B_imag, ptr_C_real, ptr_C_imag, ptr_D_real,
ptr_D_imag, alpha, beta, scalar_pointer_mode_};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
47edb432b3c73404f879dac2e79d5a1e746d4151.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#define BLOCK_DIM 16
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on GPU
//! C = alpha * A * B + beta * C
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param C matrix C as provided to device
//! @param N height of matrix A and matrix C
//! @param M width of matrix B and matrix C
//! @param K width of matrix A and height of matrix C
//! @param alpha scala value for matrix multiplication
//! @param beta scala value for matrix summation with C
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_kernel_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
hipLaunchKernelGGL(( sgemm_kernel_A), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, N, M, K, alpha, beta);
}
__global__ void sgemm_kernel_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
hipLaunchKernelGGL(( sgemm_kernel_B), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, N, M, K, alpha, beta);
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
int main()
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
int n_iter = 5;
N = M = K = 2048;
// allocation of linear memory space
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// allocation of gpu linear memory space
hipMalloc((void **)&d_A, N * K * sizeof(float));
hipMalloc((void **)&d_B, K * M * sizeof(float));
hipMalloc((void **)&d_C, N * M * sizeof(float));
// initialize randomized values for memory space
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// copy initial value for gpu memory
hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, A, K * M * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, A, N * M * sizeof(float), hipMemcpyHostToDevice);
// do operation
for (int i = 0; i < n_iter; i++)
sgemm_gpu_A(d_A, d_B, d_C, N, M, K, alpha, beta);
hipProfilerStart();
for (int i = 0; i < n_iter; i++)
sgemm_gpu_B(d_A, d_B, d_C, N, M, K, alpha, beta);
hipProfilerStop();
hipDeviceSynchronize();
// terminates allocated gpu memory space
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// terminates allocated memory space
free(A);
free(B);
free(C);
return 0;
} | 47edb432b3c73404f879dac2e79d5a1e746d4151.cu | #include <stdio.h>
#include <cuda_profiler_api.h>
#define BLOCK_DIM 16
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on GPU
//! C = alpha * A * B + beta * C
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param C matrix C as provided to device
//! @param N height of matrix A and matrix C
//! @param M width of matrix B and matrix C
//! @param K width of matrix A and height of matrix C
//! @param alpha scala value for matrix multiplication
//! @param beta scala value for matrix summation with C
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_kernel_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
sgemm_kernel_A<<<dimGrid, dimBlock>>>(A, B, C, N, M, K, alpha, beta);
}
__global__ void sgemm_kernel_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void sgemm_gpu_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
sgemm_kernel_B<<<dimGrid, dimBlock>>>(A, B, C, N, M, K, alpha, beta);
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
int main()
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
int n_iter = 5;
N = M = K = 2048;
// allocation of linear memory space
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// allocation of gpu linear memory space
cudaMalloc((void **)&d_A, N * K * sizeof(float));
cudaMalloc((void **)&d_B, K * M * sizeof(float));
cudaMalloc((void **)&d_C, N * M * sizeof(float));
// initialize randomized values for memory space
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// copy initial value for gpu memory
cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, A, K * M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, A, N * M * sizeof(float), cudaMemcpyHostToDevice);
// do operation
for (int i = 0; i < n_iter; i++)
sgemm_gpu_A(d_A, d_B, d_C, N, M, K, alpha, beta);
cudaProfilerStart();
for (int i = 0; i < n_iter; i++)
sgemm_gpu_B(d_A, d_B, d_C, N, M, K, alpha, beta);
cudaProfilerStop();
cudaDeviceSynchronize();
// terminates allocated gpu memory space
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// terminates allocated memory space
free(A);
free(B);
free(C);
return 0;
} |
4ecec7aec7aeacd45c577295243b4c7b1020393c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUAPI.h"
#include "CUFLU.h"
#ifdef GRAVITY
#include "CUPOT.h"
#endif
#ifdef GPU
// fluid solver prototypes in different models
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == RTVD )
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres );
#elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
__global__
void CUFLU_FluidSolver_MHM(
const real Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
char DE_Array_Out [][ CUBE(PS2) ],
real Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real PriVar [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Slope_PPM [][3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ],
real FC_Var [][6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ],
real FC_Flux [][3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real Gamma, const bool StoreFlux,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const double Time, const OptGravityType_t GravityType,
const real MinDens, const real MinPres, const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool JeansMinPres, const real JeansMinPres_Coeff );
#if ( NCOMP_PASSIVE > 0 )
int CUFLU_SetConstMem_FluidSolver_NormIdx( int NormIdx_h[] );
#endif
#elif ( FLU_SCHEME == CTU )
__global__
void CUFLU_FluidSolver_CTU(
const real Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
char DE_Array_Out [][ CUBE(PS2) ],
real Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real PriVar [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Slope_PPM [][3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ],
real FC_Var [][6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ],
real FC_Flux [][3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real Gamma, const bool StoreFlux,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const double Time, const OptGravityType_t GravityType,
const real MinDens, const real MinPres, const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool JeansMinPres, const real JeansMinPres_Coeff );
#if ( NCOMP_PASSIVE > 0 )
int CUFLU_SetConstMem_FluidSolver_NormIdx( int NormIdx_h[] );
#endif
#endif // FLU_SCHEME
__global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ],
const real dh, const real Safety, const real Gamma, const real MinPres );
#ifdef GRAVITY
__global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[],
const real g_Pot_Array[][ CUBE(GRA_NXT) ],
const double g_Corner_Array[][3],
const real dh, const real Safety, const bool P5_Gradient,
const OptGravityType_t GravityType, const double ExtAcc_Time );
#endif
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ],
real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ],
const real dt, const real _dh, const real Eta, const bool StoreFlux,
const real Taylor3_Coeff, const bool XYZ, const real MinDens );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
#ifdef GRAVITY
// Poisson solver prototypes
#if ( POT_SCHEME == SOR )
#ifdef USE_PSOLVER_10TO14
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#else
__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#endif // #ifdef USE_PSOLVER_10TO14 ... else ...
#elif ( POT_SCHEME == MG )
__global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const real dh_Min, const int Max_Iter, const int NPre_Smooth,
const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme );
#endif // POT_SCHEME
// Gravity solver prototypes in different models
#if ( MODEL == HYDRO )
__global__
void CUPOT_HydroGravitySolver(
real Flu_Array_New[][GRA_NIN][ CUBE(PS1) ],
const real Pot_Array_New[][ CUBE(GRA_NXT) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_G) ],
const real Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ],
char DE_Array [][ CUBE(PS1) ],
const real dt, const real dh, const bool P5_Gradient,
const OptGravityType_t GravityType,
const double TimeNew, const double TimeOld, const real MinEint );
#elif ( MODEL == MHD )
#warning :: WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real EtaDt, const real dh, const real Lambda, const bool ExtPot,
const double Time );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
int CUPOT_SetConstMem_PoissonSolver();
#endif // GRAVITY
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Set_Default_GPU_Parameter
// Description : Set several GPU parameters to the default values if they are not set in the input file
//
// Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU
// Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver
// Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver
// Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver
//-------------------------------------------------------------------------------------------------------
void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup )
{
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ );
// get the device ID
int GetDeviceID = 999;
CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) );
// load the device properties
hipDeviceProp_t DeviceProp;
CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) );
// set the default GPU parameters
// (1) GPU_NSTREAM
if ( GPU_NStream <= 0 )
{
if ( DeviceProp.deviceOverlap )
{
# if ( MODEL == HYDRO )
# if ( GPU_ARCH == FERMI )
GPU_NStream = 8;
# elif ( GPU_ARCH == KEPLER )
GPU_NStream = 32;
# elif ( GPU_ARCH == MAXWELL )
GPU_NStream = 32;
# elif ( GPU_ARCH == PASCAL )
GPU_NStream = 32;
# elif ( GPU_ARCH == VOLTA )
GPU_NStream = 32;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
# if ( GPU_ARCH == FERMI )
GPU_NStream = 8;
# elif ( GPU_ARCH == KEPLER )
GPU_NStream = 32;
# elif ( GPU_ARCH == MAXWELL )
GPU_NStream = 32;
# elif ( GPU_ARCH == PASCAL )
GPU_NStream = 32;
# elif ( GPU_ARCH == VOLTA )
GPU_NStream = 32;
# else
# error : ERROR : UNKNOWN GPU_ARCH !!
# endif
# else
# error : ERROR : UNKNOWN MODEL !!
# endif // MODEL
} // if ( DeviceProp.deviceOverlap )
else
GPU_NStream = 1;
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM );
} // if ( GPU_NStream <= 0 )
// (2) XXX_GPU_NPGROUP
// (2-1) FLU_GPU_NPGROUP
if ( Flu_GPU_NPGroup <= 0 )
{
# if ( MODEL == HYDRO )
# if ( GPU_ARCH == FERMI )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
# if ( GPU_ARCH == FERMI )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# else
# error : ERROR : UNKNOWN MODEL !!
# endif // MODEL
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup );
} // if ( Flu_GPU_NPGroup <= 0 )
// (2-2) POT_GPU_NPGROUP
# ifdef GRAVITY
if ( Pot_GPU_NPGroup <= 0 )
{
# if ( GPU_ARCH == FERMI )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup );
} // if ( Pot_GPU_NPGroup <= 0 )
# endif
// (2-3) CHE_GPU_NPGROUP
# ifdef SUPPORT_GRACKLE
if ( Che_GPU_NPGroup <= 0 )
{
# if ( GPU_ARCH == FERMI )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup );
} // if ( Che_GPU_NPGroup <= 0 )
# endif
// (3) cache preference
// (3-1) fluid solver
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == RTVD )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, hipFuncCachePreferShared ) );
# elif ( FLU_SCHEME == MHM )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) );
# elif ( FLU_SCHEME == MHM_RP )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) );
# elif ( FLU_SCHEME == CTU )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_CTU, hipFuncCachePreferL1 ) );
# endif
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, hipFuncCachePreferShared ) );
# ifdef GRAVITY
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, hipFuncCachePreferShared ) );
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_ELBDMSolver, hipFuncCachePreferShared ) );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
# ifdef GRAVITY
// (3-2) Poisson solver
# if ( POT_SCHEME == SOR )
# ifdef USE_PSOLVER_10TO14
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, hipFuncCachePreferShared ) );
# else
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, hipFuncCachePreferShared ) );
# endif
# elif ( POT_SCHEME == MG )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_MG, hipFuncCachePreferShared ) );
# endif // POT_SCHEME
// (3-3) gravity solver
# if ( MODEL == HYDRO )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_HydroGravitySolver, hipFuncCachePreferShared ) );
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL == ELBDM )
CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, hipFuncCachePreferL1 ) );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
# endif // GRAVITY
// (4) set the constant variables
// --> note that the auxiliary arrays for the external acceleration and potential are set by CUAPI_Init_ExternalAccPot()
# if ( NCOMP_PASSIVE > 0 )
if ( OPT__NORMALIZE_PASSIVE )
{
# if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) )
if ( CUFLU_SetConstMem_FluidSolver_NormIdx(PassiveNorm_VarIdx) != 0 )
Aux_Error( ERROR_INFO, "CUFLU_SetConstMem_FluidSolver_NormIdx failed ...\n" );
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# endif // MODEL
}
# endif // #if ( NCOMP_PASSIVE > 0 )
# ifdef GRAVITY
if ( CUPOT_SetConstMem_PoissonSolver() != 0 )
Aux_Error( ERROR_INFO, "CUPOT_SetConstMem_PoissonSolver failed ...\n" );
# endif // #ifdef GRAVITY
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ );
} // FUNCTION : CUAPI_Set_Default_GPU_Parameter
#endif // #ifdef GPU
| 4ecec7aec7aeacd45c577295243b4c7b1020393c.cu | #include "CUAPI.h"
#include "CUFLU.h"
#ifdef GRAVITY
#include "CUPOT.h"
#endif
#ifdef GPU
// fluid solver prototypes in different models
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == RTVD )
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const real Gamma, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres );
#elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
__global__
void CUFLU_FluidSolver_MHM(
const real Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
char DE_Array_Out [][ CUBE(PS2) ],
real Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real PriVar [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Slope_PPM [][3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ],
real FC_Var [][6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ],
real FC_Flux [][3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real Gamma, const bool StoreFlux,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const double Time, const OptGravityType_t GravityType,
const real MinDens, const real MinPres, const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool JeansMinPres, const real JeansMinPres_Coeff );
#if ( NCOMP_PASSIVE > 0 )
int CUFLU_SetConstMem_FluidSolver_NormIdx( int NormIdx_h[] );
#endif
#elif ( FLU_SCHEME == CTU )
__global__
void CUFLU_FluidSolver_CTU(
const real Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
char DE_Array_Out [][ CUBE(PS2) ],
real Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real PriVar [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real Slope_PPM [][3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ],
real FC_Var [][6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ],
real FC_Flux [][3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real Gamma, const bool StoreFlux,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const double Time, const OptGravityType_t GravityType,
const real MinDens, const real MinPres, const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool JeansMinPres, const real JeansMinPres_Coeff );
#if ( NCOMP_PASSIVE > 0 )
int CUFLU_SetConstMem_FluidSolver_NormIdx( int NormIdx_h[] );
#endif
#endif // FLU_SCHEME
__global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ],
const real dh, const real Safety, const real Gamma, const real MinPres );
#ifdef GRAVITY
__global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[],
const real g_Pot_Array[][ CUBE(GRA_NXT) ],
const double g_Corner_Array[][3],
const real dh, const real Safety, const bool P5_Gradient,
const OptGravityType_t GravityType, const double ExtAcc_Time );
#endif
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ],
real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ],
const real dt, const real _dh, const real Eta, const bool StoreFlux,
const real Taylor3_Coeff, const bool XYZ, const real MinDens );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
#ifdef GRAVITY
// Poisson solver prototypes
#if ( POT_SCHEME == SOR )
#ifdef USE_PSOLVER_10TO14
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#else
__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#endif // #ifdef USE_PSOLVER_10TO14 ... else ...
#elif ( POT_SCHEME == MG )
__global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const real dh_Min, const int Max_Iter, const int NPre_Smooth,
const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme );
#endif // POT_SCHEME
// Gravity solver prototypes in different models
#if ( MODEL == HYDRO )
__global__
void CUPOT_HydroGravitySolver(
real Flu_Array_New[][GRA_NIN][ CUBE(PS1) ],
const real Pot_Array_New[][ CUBE(GRA_NXT) ],
const double Corner_Array [][3],
const real Pot_Array_USG[][ CUBE(USG_NXT_G) ],
const real Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ],
char DE_Array [][ CUBE(PS1) ],
const real dt, const real dh, const bool P5_Gradient,
const OptGravityType_t GravityType,
const double TimeNew, const double TimeOld, const real MinEint );
#elif ( MODEL == MHD )
#warning :: WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real EtaDt, const real dh, const real Lambda, const bool ExtPot,
const double Time );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
int CUPOT_SetConstMem_PoissonSolver();
#endif // GRAVITY
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Set_Default_GPU_Parameter
// Description : Set several GPU parameters to the default values if they are not set in the input file
//
// Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU
// Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver
// Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver
// Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver
//-------------------------------------------------------------------------------------------------------
void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup )
{
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ );
// get the device ID
int GetDeviceID = 999;
CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) );
// load the device properties
cudaDeviceProp DeviceProp;
CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) );
// set the default GPU parameters
// (1) GPU_NSTREAM
if ( GPU_NStream <= 0 )
{
if ( DeviceProp.deviceOverlap )
{
# if ( MODEL == HYDRO )
# if ( GPU_ARCH == FERMI )
GPU_NStream = 8;
# elif ( GPU_ARCH == KEPLER )
GPU_NStream = 32;
# elif ( GPU_ARCH == MAXWELL )
GPU_NStream = 32;
# elif ( GPU_ARCH == PASCAL )
GPU_NStream = 32;
# elif ( GPU_ARCH == VOLTA )
GPU_NStream = 32;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
# if ( GPU_ARCH == FERMI )
GPU_NStream = 8;
# elif ( GPU_ARCH == KEPLER )
GPU_NStream = 32;
# elif ( GPU_ARCH == MAXWELL )
GPU_NStream = 32;
# elif ( GPU_ARCH == PASCAL )
GPU_NStream = 32;
# elif ( GPU_ARCH == VOLTA )
GPU_NStream = 32;
# else
# error : ERROR : UNKNOWN GPU_ARCH !!
# endif
# else
# error : ERROR : UNKNOWN MODEL !!
# endif // MODEL
} // if ( DeviceProp.deviceOverlap )
else
GPU_NStream = 1;
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM );
} // if ( GPU_NStream <= 0 )
// (2) XXX_GPU_NPGROUP
// (2-1) FLU_GPU_NPGROUP
if ( Flu_GPU_NPGroup <= 0 )
{
# if ( MODEL == HYDRO )
# if ( GPU_ARCH == FERMI )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
# if ( GPU_ARCH == FERMI )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
# else
# error : ERROR : UNKNOWN MODEL !!
# endif // MODEL
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup );
} // if ( Flu_GPU_NPGroup <= 0 )
// (2-2) POT_GPU_NPGROUP
# ifdef GRAVITY
if ( Pot_GPU_NPGroup <= 0 )
{
# if ( GPU_ARCH == FERMI )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup );
} // if ( Pot_GPU_NPGroup <= 0 )
# endif
// (2-3) CHE_GPU_NPGROUP
# ifdef SUPPORT_GRACKLE
if ( Che_GPU_NPGroup <= 0 )
{
# if ( GPU_ARCH == FERMI )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == KEPLER )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == MAXWELL )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == PASCAL )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# elif ( GPU_ARCH == VOLTA )
Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount;
# else
# error : UNKNOWN GPU_ARCH !!
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d"
" --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup );
} // if ( Che_GPU_NPGroup <= 0 )
# endif
// (3) cache preference
// (3-1) fluid solver
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == RTVD )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, cudaFuncCachePreferShared ) );
# elif ( FLU_SCHEME == MHM )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) );
# elif ( FLU_SCHEME == MHM_RP )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) );
# elif ( FLU_SCHEME == CTU )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_CTU, cudaFuncCachePreferL1 ) );
# endif
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, cudaFuncCachePreferShared ) );
# ifdef GRAVITY
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, cudaFuncCachePreferShared ) );
# endif
# elif ( MODEL == MHD )
# warning :: WAIT MHD !!!
# elif ( MODEL == ELBDM )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_ELBDMSolver, cudaFuncCachePreferShared ) );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
# ifdef GRAVITY
// (3-2) Poisson solver
# if ( POT_SCHEME == SOR )
# ifdef USE_PSOLVER_10TO14
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, cudaFuncCachePreferShared ) );
# else
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, cudaFuncCachePreferShared ) );
# endif
# elif ( POT_SCHEME == MG )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_MG, cudaFuncCachePreferShared ) );
# endif // POT_SCHEME
// (3-3) gravity solver
# if ( MODEL == HYDRO )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_HydroGravitySolver, cudaFuncCachePreferShared ) );
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL == ELBDM )
CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, cudaFuncCachePreferL1 ) );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
# endif // GRAVITY
// (4) set the constant variables
// --> note that the auxiliary arrays for the external acceleration and potential are set by CUAPI_Init_ExternalAccPot()
# if ( NCOMP_PASSIVE > 0 )
if ( OPT__NORMALIZE_PASSIVE )
{
# if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) )
if ( CUFLU_SetConstMem_FluidSolver_NormIdx(PassiveNorm_VarIdx) != 0 )
Aux_Error( ERROR_INFO, "CUFLU_SetConstMem_FluidSolver_NormIdx failed ...\n" );
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# endif // MODEL
}
# endif // #if ( NCOMP_PASSIVE > 0 )
# ifdef GRAVITY
if ( CUPOT_SetConstMem_PoissonSolver() != 0 )
Aux_Error( ERROR_INFO, "CUPOT_SetConstMem_PoissonSolver failed ...\n" );
# endif // #ifdef GRAVITY
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ );
} // FUNCTION : CUAPI_Set_Default_GPU_Parameter
#endif // #ifdef GPU
|
16f1ee17e6e6208de53fbb83c98a6c5a8b618ff5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
hipFree(a);
} | 16f1ee17e6e6208de53fbb83c98a6c5a8b618ff5.cu | #include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
cudaFree(a);
} |
15c8503e74538f1359cca506ee829454e4e978e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#define CEIL(a, b)((a - 1) / b + 1)
const int ROWS = 999;
const int COLUMNS = 1000;
const int BLOCK_SIZE = 1024;
const int MATRIX_BYTES = ROWS * COLUMNS * sizeof(int);
__global__ void get_matrix(int * d_in, int * d_out) {
int idx = threadIdx.x;
int m = (ROWS > COLUMNS ? COLUMNS : ROWS) - 1;
int n = (ROWS > COLUMNS ? ROWS : COLUMNS) - 1;
int n_iter = m + n - 1;
int tbc, left, up, corner;
for (int i = 0; i < n_iter; i++) {
int x_index, y_index;
if (i >= n) {
// FOR THIRD PART OF ITERATIONS
if (idx >= i - n + 1) {
if (ROWS <= COLUMNS) {
x_index = i + 1 - idx + m - n;
y_index = idx + 1 + n - m;
}
else {
x_index = idx + 1 + n - m;
y_index = i + 1 - idx + m - n;
}
tbc = * (d_in + ((x_index) * COLUMNS + (y_index)));
if (!tbc) {
*(d_out + ((x_index) * COLUMNS + (y_index))) = 0;
}
else {
left = * (d_out + ((x_index) * COLUMNS + (y_index - 1)));
up = * (d_out + ((x_index - 1) * COLUMNS + (y_index)));
corner = * (d_out + ((x_index - 1) * COLUMNS + (y_index - 1)));
int mini = (left > up ? up : left);
*(d_out + ((x_index) * COLUMNS + (y_index))) = (mini > corner ? corner : mini) + 1;
}
}
}
else if (i >= m - 1 && i <= n - 1) {
// FOR SECOND PART OF ITERATIONS
if (ROWS <= COLUMNS) {
x_index = m - idx;
y_index = idx + i - m + 2;
}
else {
x_index = idx + i - m + 2;
y_index = m - idx;
}
tbc = * (d_in + ((x_index) * COLUMNS + (y_index)));
if (!tbc){
*(d_out + ((x_index) * COLUMNS + (y_index))) = 0;
}
else {
left = * (d_out + ((x_index) * COLUMNS + (y_index - 1)));
up = * (d_out + ((x_index - 1) * COLUMNS + (y_index)));
corner = * (d_out + ((x_index - 1) * COLUMNS + (y_index - 1)));
int mini = (left > up ? up : left);
*(d_out + ((x_index) * COLUMNS + (y_index))) = (mini > corner ? corner : mini) + 1;
}
}
else {
// FOR FIRST PART OF ITERATIONS
if (idx <= i) {
tbc = * (d_in + ((i + 1 - idx) * COLUMNS + (idx + 1)));
if (!tbc){
*(d_out + ((i + 1 - idx) * COLUMNS + (idx + 1))) = 0;
}
else {
left = * (d_out + ((i + 1 - idx) * COLUMNS + (idx)));
up = * (d_out + ((i - idx) * COLUMNS + (idx + 1)));
corner = * (d_out + ((i - idx) * COLUMNS + (idx)));
int mini = (left > up ? up : left);
*(d_out + ((i + 1 - idx) * COLUMNS + (idx + 1))) = (mini > corner ? corner : mini) + 1;
}
}
}
__syncthreads();
}
}
int S[ROWS][COLUMNS];
struct combine {
int max;
int * ptr;
};
struct combine ans_cpu;
void cpu_method(int * h_in_element) {
int i, j;
for (i = 0; i < ROWS; i++)
S[i][0] = * (h_in_element + i * COLUMNS);
for (j = 0; j < COLUMNS; j++)
S[0][j] = * (h_in_element + j);
for (i = 1; i < ROWS; i++) {
for (j = 1; j < COLUMNS; j++) {
if ( * (h_in_element + (i * COLUMNS + j)) == 1) {
int left = S[i][j - 1];
int up = S[i - 1][j];
int corner = S[i - 1][j - 1];
int mini = (left > up ? up : left);
S[i][j] = (mini > corner ? corner : mini) + 1;
}
else
S[i][j] = 0;
}
}
int max_of_s = S[0][0];
for (i = 0; i < ROWS; i++) {
for (j = 0; j < COLUMNS; j++) {
if (max_of_s < S[i][j])
max_of_s = S[i][j];
}
}
ans_cpu.max = max_of_s;
ans_cpu.ptr = & S[0][0];
}
int test_solution(int * ptr, int * h_out, int * h_in, int gpu_result, int cpu_result) {
int flag = 1;
for (int i = 0; i < ROWS; i++) {
int j;
for (j = 0; j < COLUMNS; j++) {
if ( * (ptr + (i * COLUMNS + j)) != * (h_out + (i * COLUMNS + j))) {
flag = 0;
break;
}
}
if (j != COLUMNS)
break;
}
if (gpu_result != cpu_result)
flag = 0;
return flag;
}
__global__ void find_max(int * d_final, int * d_max) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < ROWS * COLUMNS) {
int step = 1;
while (step < blockDim.x) {
if (idx % (step * 2) == 0 && (idx + step) < (ROWS * COLUMNS)) {
if(*(d_final + idx + step) > *(d_final + idx))
*(d_final + idx) = *(d_final + idx + step);
}
__syncthreads();
step *= 2;
}
if (threadIdx.x == 0) {
atomicMax(d_max, *(d_final + idx));
}
}
}
int main() {
srand(time(0));
int h_in[ROWS * COLUMNS], h_out[ROWS * COLUMNS], * h_max;
h_max = (int * ) malloc(sizeof(int));
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLUMNS; j++) {
*(h_in + (i * COLUMNS + j)) = rand() % 2;
}
}
for (int i = 0; i < COLUMNS; i++)
*
(h_out + (i)) = * (h_in + (i));
for (int i = 0; i < ROWS; i++)
*
(h_out + (i * COLUMNS)) = * (h_in + (i * COLUMNS));
for (int i = 1; i < ROWS; i++) {
for (int j = 1; j < COLUMNS; j++) {
*(h_out + (i * COLUMNS + j)) = -1;
}
}
int * d_in, * d_out, * d_final, * d_max;
hipMalloc( &d_in, MATRIX_BYTES);
hipMalloc( &d_out, MATRIX_BYTES);
hipMalloc( &d_final, MATRIX_BYTES);
hipMalloc( &d_max, sizeof(int));
hipMemcpy(d_in, h_in, MATRIX_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out, h_out, MATRIX_BYTES, hipMemcpyHostToDevice);
int min = ROWS >= COLUMNS ? COLUMNS : ROWS;
int THREADS;
if (min >= BLOCK_SIZE - 1)
THREADS = BLOCK_SIZE;
else
THREADS = min - 1;
hipEvent_t start, stop;
hipEventCreate( &start);
hipEventCreate( &stop);
hipEventRecord(start);
hipLaunchKernelGGL(( get_matrix), dim3(1), dim3(THREADS), 0, 0, d_in, d_out);
hipMemcpy(h_out, d_out, MATRIX_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(d_final, h_out, MATRIX_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_max), dim3(CEIL((ROWS * COLUMNS), BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_final, d_max);
hipMemcpy(h_max, d_max, sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime( &milliseconds, start, stop);
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime = 0;
cpu_startTime = clock();
cpu_method( &h_in[0]);
int * ptr = ans_cpu.ptr;
int cpu_result = ans_cpu.max;
cpu_endTime = clock();
cpu_ElapseTime = ((cpu_endTime - cpu_startTime) / (1.0 * CLOCKS_PER_SEC)) * 1000;
int flag = test_solution(ptr, &h_out[0], &h_in[0], * h_max, cpu_result);
if (flag) {
printf("The computed matrix is correct!\n");
printf("Time taken by GPU : %f ms\n", milliseconds);
printf("Time taken by CPU : %f ms\n", cpu_ElapseTime);
printf("The area of square is %d sq units\n", ( *h_max) * ( *h_max));
}
else {
printf("The computed matrix is incorrect!\n");
}
hipFree(d_in);
hipFree(d_out);
hipFree(d_final);
hipFree(d_max);
free(h_max);
return 0;
}
| 15c8503e74538f1359cca506ee829454e4e978e5.cu | #include<stdio.h>
#include<stdlib.h>
#define CEIL(a, b)((a - 1) / b + 1)
const int ROWS = 999;
const int COLUMNS = 1000;
const int BLOCK_SIZE = 1024;
const int MATRIX_BYTES = ROWS * COLUMNS * sizeof(int);
__global__ void get_matrix(int * d_in, int * d_out) {
int idx = threadIdx.x;
int m = (ROWS > COLUMNS ? COLUMNS : ROWS) - 1;
int n = (ROWS > COLUMNS ? ROWS : COLUMNS) - 1;
int n_iter = m + n - 1;
int tbc, left, up, corner;
for (int i = 0; i < n_iter; i++) {
int x_index, y_index;
if (i >= n) {
// FOR THIRD PART OF ITERATIONS
if (idx >= i - n + 1) {
if (ROWS <= COLUMNS) {
x_index = i + 1 - idx + m - n;
y_index = idx + 1 + n - m;
}
else {
x_index = idx + 1 + n - m;
y_index = i + 1 - idx + m - n;
}
tbc = * (d_in + ((x_index) * COLUMNS + (y_index)));
if (!tbc) {
*(d_out + ((x_index) * COLUMNS + (y_index))) = 0;
}
else {
left = * (d_out + ((x_index) * COLUMNS + (y_index - 1)));
up = * (d_out + ((x_index - 1) * COLUMNS + (y_index)));
corner = * (d_out + ((x_index - 1) * COLUMNS + (y_index - 1)));
int mini = (left > up ? up : left);
*(d_out + ((x_index) * COLUMNS + (y_index))) = (mini > corner ? corner : mini) + 1;
}
}
}
else if (i >= m - 1 && i <= n - 1) {
// FOR SECOND PART OF ITERATIONS
if (ROWS <= COLUMNS) {
x_index = m - idx;
y_index = idx + i - m + 2;
}
else {
x_index = idx + i - m + 2;
y_index = m - idx;
}
tbc = * (d_in + ((x_index) * COLUMNS + (y_index)));
if (!tbc){
*(d_out + ((x_index) * COLUMNS + (y_index))) = 0;
}
else {
left = * (d_out + ((x_index) * COLUMNS + (y_index - 1)));
up = * (d_out + ((x_index - 1) * COLUMNS + (y_index)));
corner = * (d_out + ((x_index - 1) * COLUMNS + (y_index - 1)));
int mini = (left > up ? up : left);
*(d_out + ((x_index) * COLUMNS + (y_index))) = (mini > corner ? corner : mini) + 1;
}
}
else {
// FOR FIRST PART OF ITERATIONS
if (idx <= i) {
tbc = * (d_in + ((i + 1 - idx) * COLUMNS + (idx + 1)));
if (!tbc){
*(d_out + ((i + 1 - idx) * COLUMNS + (idx + 1))) = 0;
}
else {
left = * (d_out + ((i + 1 - idx) * COLUMNS + (idx)));
up = * (d_out + ((i - idx) * COLUMNS + (idx + 1)));
corner = * (d_out + ((i - idx) * COLUMNS + (idx)));
int mini = (left > up ? up : left);
*(d_out + ((i + 1 - idx) * COLUMNS + (idx + 1))) = (mini > corner ? corner : mini) + 1;
}
}
}
__syncthreads();
}
}
int S[ROWS][COLUMNS];
struct combine {
int max;
int * ptr;
};
struct combine ans_cpu;
void cpu_method(int * h_in_element) {
int i, j;
for (i = 0; i < ROWS; i++)
S[i][0] = * (h_in_element + i * COLUMNS);
for (j = 0; j < COLUMNS; j++)
S[0][j] = * (h_in_element + j);
for (i = 1; i < ROWS; i++) {
for (j = 1; j < COLUMNS; j++) {
if ( * (h_in_element + (i * COLUMNS + j)) == 1) {
int left = S[i][j - 1];
int up = S[i - 1][j];
int corner = S[i - 1][j - 1];
int mini = (left > up ? up : left);
S[i][j] = (mini > corner ? corner : mini) + 1;
}
else
S[i][j] = 0;
}
}
int max_of_s = S[0][0];
for (i = 0; i < ROWS; i++) {
for (j = 0; j < COLUMNS; j++) {
if (max_of_s < S[i][j])
max_of_s = S[i][j];
}
}
ans_cpu.max = max_of_s;
ans_cpu.ptr = & S[0][0];
}
int test_solution(int * ptr, int * h_out, int * h_in, int gpu_result, int cpu_result) {
int flag = 1;
for (int i = 0; i < ROWS; i++) {
int j;
for (j = 0; j < COLUMNS; j++) {
if ( * (ptr + (i * COLUMNS + j)) != * (h_out + (i * COLUMNS + j))) {
flag = 0;
break;
}
}
if (j != COLUMNS)
break;
}
if (gpu_result != cpu_result)
flag = 0;
return flag;
}
__global__ void find_max(int * d_final, int * d_max) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < ROWS * COLUMNS) {
int step = 1;
while (step < blockDim.x) {
if (idx % (step * 2) == 0 && (idx + step) < (ROWS * COLUMNS)) {
if(*(d_final + idx + step) > *(d_final + idx))
*(d_final + idx) = *(d_final + idx + step);
}
__syncthreads();
step *= 2;
}
if (threadIdx.x == 0) {
atomicMax(d_max, *(d_final + idx));
}
}
}
int main() {
srand(time(0));
int h_in[ROWS * COLUMNS], h_out[ROWS * COLUMNS], * h_max;
h_max = (int * ) malloc(sizeof(int));
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLUMNS; j++) {
*(h_in + (i * COLUMNS + j)) = rand() % 2;
}
}
for (int i = 0; i < COLUMNS; i++)
*
(h_out + (i)) = * (h_in + (i));
for (int i = 0; i < ROWS; i++)
*
(h_out + (i * COLUMNS)) = * (h_in + (i * COLUMNS));
for (int i = 1; i < ROWS; i++) {
for (int j = 1; j < COLUMNS; j++) {
*(h_out + (i * COLUMNS + j)) = -1;
}
}
int * d_in, * d_out, * d_final, * d_max;
cudaMalloc( &d_in, MATRIX_BYTES);
cudaMalloc( &d_out, MATRIX_BYTES);
cudaMalloc( &d_final, MATRIX_BYTES);
cudaMalloc( &d_max, sizeof(int));
cudaMemcpy(d_in, h_in, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, h_out, MATRIX_BYTES, cudaMemcpyHostToDevice);
int min = ROWS >= COLUMNS ? COLUMNS : ROWS;
int THREADS;
if (min >= BLOCK_SIZE - 1)
THREADS = BLOCK_SIZE;
else
THREADS = min - 1;
cudaEvent_t start, stop;
cudaEventCreate( &start);
cudaEventCreate( &stop);
cudaEventRecord(start);
get_matrix<<<1, THREADS>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, MATRIX_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(d_final, h_out, MATRIX_BYTES, cudaMemcpyHostToDevice);
find_max<<<CEIL((ROWS * COLUMNS), BLOCK_SIZE), BLOCK_SIZE>>>(d_final, d_max);
cudaMemcpy(h_max, d_max, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime( &milliseconds, start, stop);
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime = 0;
cpu_startTime = clock();
cpu_method( &h_in[0]);
int * ptr = ans_cpu.ptr;
int cpu_result = ans_cpu.max;
cpu_endTime = clock();
cpu_ElapseTime = ((cpu_endTime - cpu_startTime) / (1.0 * CLOCKS_PER_SEC)) * 1000;
int flag = test_solution(ptr, &h_out[0], &h_in[0], * h_max, cpu_result);
if (flag) {
printf("The computed matrix is correct!\n");
printf("Time taken by GPU : %f ms\n", milliseconds);
printf("Time taken by CPU : %f ms\n", cpu_ElapseTime);
printf("The area of square is %d sq units\n", ( *h_max) * ( *h_max));
}
else {
printf("The computed matrix is incorrect!\n");
}
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_final);
cudaFree(d_max);
free(h_max);
return 0;
}
|
81124b3d68c45245b635ac3f0227e51a11b26619.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "diffusion3d_cuda.h"
#define CUDA_SAFE_CALL(c) \
do { \
assert(c == hipSuccess); \
} while (0)
namespace diffusion3d {
__global__ void diffusion_kernel_shared(F1_DECL, REAL *__restrict f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
const int i = blockDim.x * blockIdx.x + tid_x;
const int j = blockDim.y * blockIdx.y + tid_y;
const int xy = nx * ny;
__shared__ REAL sb[BLOCK_X * BLOCK_Y];
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int c = i + j * nx + k * xy;
const int c1 = tid_x + tid_y * blockDim.x;
REAL t1, t2, t3;
t3 = f1[c];
t2 = (k == 0) ? t3 : f1[c-xy];
int w = (i == 0) ? c1 : c1 - 1;
int e = (i == nx-1) ? c1 : c1 + 1;
int n = (j == 0) ? c1 : c1 - blockDim.x;
int s = (j == ny-1) ? c1 : c1 + blockDim.x;
int bw = tid_x == 0 && i != 0;
int be = tid_x == blockDim.x-1 && i != nx - 1;
int bn = tid_y == 0 && j != 0;
int bs = tid_y == blockDim.y-1 && j != ny - 1;
#pragma unroll
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
sb[c1] = t2;
t3 = f1[c+xy];
REAL t = cc * t2 + cb * t1 + ct * t3;
__syncthreads();
t += cw * (bw ? f1[c-1] : sb[w]);
t += ce * (be ? f1[c+1] : sb[e]);
t += cs * (bs ? f1[c+nx] : sb[s]);
t += cn * (bn ? f1[c-nx] : sb[n]);
f2[c] = t;
c += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
sb[c1] = t2;
t3 = (k < nz-1) ? f1[c+xy] : t3;
REAL t = cc * t2 + cb * t1 + ct * t3;
__syncthreads();
t += cw * (bw ? f1[c-1] : sb[w]);
t += ce * (be ? f1[c+1] : sb[e]);
t += cs * (bs ? f1[c+nx] : sb[s]);
t += cn * (bn ? f1[c-nx] : sb[n]);
f2[c] = t;
return;
}
void Diffusion3DCUDAShared::InitializeBenchmark() {
Diffusion3DCUDA::InitializeBenchmark();
CUDA_SAFE_CALL(hipFuncSetCacheConfig(diffusion_kernel_shared,
hipFuncCachePreferShared));
}
void Diffusion3DCUDAShared::RunKernel(int count) {
int flag = 0;
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nx_ / block_x_ > 0);
assert(ny_ / block_y_ > 0);
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, 1);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_);
#pragma omp parallel num_threads(2) shared(flag)
{
if (omp_get_thread_num() == 0)
{
power = GetPowerGPU(&flag, 0);
}
else
{
#pragma omp barrier
CUDA_SAFE_CALL(hipEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
hipLaunchKernelGGL(( diffusion_kernel_shared), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CUDA_SAFE_CALL(hipEventRecord(ev2_));
CUDA_SAFE_CALL(hipDeviceSynchronize());
flag = 1;
}
}
CUDA_SAFE_CALL(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
}
| 81124b3d68c45245b635ac3f0227e51a11b26619.cu | #include "diffusion3d_cuda.h"
#define CUDA_SAFE_CALL(c) \
do { \
assert(c == cudaSuccess); \
} while (0)
namespace diffusion3d {
__global__ void diffusion_kernel_shared(F1_DECL, REAL *__restrict f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
const int i = blockDim.x * blockIdx.x + tid_x;
const int j = blockDim.y * blockIdx.y + tid_y;
const int xy = nx * ny;
__shared__ REAL sb[BLOCK_X * BLOCK_Y];
const int block_z = nz / gridDim.z;
int k = block_z * blockIdx.z;
const int k_end = k + block_z;
int c = i + j * nx + k * xy;
const int c1 = tid_x + tid_y * blockDim.x;
REAL t1, t2, t3;
t3 = f1[c];
t2 = (k == 0) ? t3 : f1[c-xy];
int w = (i == 0) ? c1 : c1 - 1;
int e = (i == nx-1) ? c1 : c1 + 1;
int n = (j == 0) ? c1 : c1 - blockDim.x;
int s = (j == ny-1) ? c1 : c1 + blockDim.x;
int bw = tid_x == 0 && i != 0;
int be = tid_x == blockDim.x-1 && i != nx - 1;
int bn = tid_y == 0 && j != 0;
int bs = tid_y == blockDim.y-1 && j != ny - 1;
#pragma unroll
for (; k < k_end-1; ++k) {
t1 = t2;
t2 = t3;
sb[c1] = t2;
t3 = f1[c+xy];
REAL t = cc * t2 + cb * t1 + ct * t3;
__syncthreads();
t += cw * (bw ? f1[c-1] : sb[w]);
t += ce * (be ? f1[c+1] : sb[e]);
t += cs * (bs ? f1[c+nx] : sb[s]);
t += cn * (bn ? f1[c-nx] : sb[n]);
f2[c] = t;
c += xy;
__syncthreads();
}
t1 = t2;
t2 = t3;
sb[c1] = t2;
t3 = (k < nz-1) ? f1[c+xy] : t3;
REAL t = cc * t2 + cb * t1 + ct * t3;
__syncthreads();
t += cw * (bw ? f1[c-1] : sb[w]);
t += ce * (be ? f1[c+1] : sb[e]);
t += cs * (bs ? f1[c+nx] : sb[s]);
t += cn * (bn ? f1[c-nx] : sb[n]);
f2[c] = t;
return;
}
void Diffusion3DCUDAShared::InitializeBenchmark() {
Diffusion3DCUDA::InitializeBenchmark();
CUDA_SAFE_CALL(cudaFuncSetCacheConfig(diffusion_kernel_shared,
cudaFuncCachePreferShared));
}
void Diffusion3DCUDAShared::RunKernel(int count) {
int flag = 0;
assert(nx_ % block_x_ == 0);
assert(ny_ % block_y_ == 0);
assert(nx_ / block_x_ > 0);
assert(ny_ / block_y_ > 0);
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
dim3 block_dim(block_x_, block_y_, 1);
dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_);
#pragma omp parallel num_threads(2) shared(flag)
{
if (omp_get_thread_num() == 0)
{
power = GetPowerGPU(&flag, 0);
}
else
{
#pragma omp barrier
CUDA_SAFE_CALL(cudaEventRecord(ev1_));
for (int i = 0; i < count; ++i) {
diffusion_kernel_shared<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
REAL *t = f1_d_;
f1_d_ = f2_d_;
f2_d_ = t;
}
CUDA_SAFE_CALL(cudaEventRecord(ev2_));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
flag = 1;
}
}
CUDA_SAFE_CALL(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
}
|
35fb496d7e5a9cb027c9f7179e8276bd9fe039f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel2DXp ( double* dataOutput, double* dataInput, const double* weights, const int numSten, const int numStenLeft, const int numStenRight, const int nxLocal, const int nyLocal, const int BLOCK_X, const int nx )
{
// -----------------------------
// Allocate the shared memory
// -----------------------------
extern __shared__ int memory[];
double* arrayLocal = (double*)&memory;
double* weigthsLocal = (double*)&arrayLocal[nxLocal * nyLocal];
// Move the weigths into shared memory
#pragma unroll
for (int k = 0; k < numSten; k++)
{
weigthsLocal[k] = weights[k];
}
// -----------------------------
// Set the indexing
// -----------------------------
// True matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Local matrix index
int localIdx = threadIdx.x + numStenLeft;
int localIdy = threadIdx.y;
// Local sum variable
double sum = 0.0;
// Set index for summing stencil
int stenSet;
// -----------------------------
// Set interior
// -----------------------------
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
// -----------------------------
// Set x boundaries
// -----------------------------
// If block is in the interior
if (blockIdx.x != 0 && blockIdx.x != nx / BLOCK_X - 1)
{
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// If block is on the left boundary
if (blockIdx.x == 0)
{
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (nx - numStenLeft + threadIdx.x)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// Set the right boundary blocks
if (blockIdx.x == nx / BLOCK_X - 1)
{
arrayLocal[localIdy * nxLocal + threadIdx.x + numStenLeft] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + threadIdx.x];
}
}
// -----------------------------
// Compute the stencil
// -----------------------------
__syncthreads();
stenSet = localIdy * nxLocal + threadIdx.x;
#pragma unroll
for (int k = 0; k < numSten; k++)
{
sum += weigthsLocal[k] * arrayLocal[stenSet + k];
}
__syncthreads();
// -----------------------------
// Copy back to global
// -----------------------------
dataOutput[globalIdy * nx + globalIdx] = sum;
} | 35fb496d7e5a9cb027c9f7179e8276bd9fe039f9.cu | #include "includes.h"
__global__ void kernel2DXp ( double* dataOutput, double* dataInput, const double* weights, const int numSten, const int numStenLeft, const int numStenRight, const int nxLocal, const int nyLocal, const int BLOCK_X, const int nx )
{
// -----------------------------
// Allocate the shared memory
// -----------------------------
extern __shared__ int memory[];
double* arrayLocal = (double*)&memory;
double* weigthsLocal = (double*)&arrayLocal[nxLocal * nyLocal];
// Move the weigths into shared memory
#pragma unroll
for (int k = 0; k < numSten; k++)
{
weigthsLocal[k] = weights[k];
}
// -----------------------------
// Set the indexing
// -----------------------------
// True matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Local matrix index
int localIdx = threadIdx.x + numStenLeft;
int localIdy = threadIdx.y;
// Local sum variable
double sum = 0.0;
// Set index for summing stencil
int stenSet;
// -----------------------------
// Set interior
// -----------------------------
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
// -----------------------------
// Set x boundaries
// -----------------------------
// If block is in the interior
if (blockIdx.x != 0 && blockIdx.x != nx / BLOCK_X - 1)
{
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// If block is on the left boundary
if (blockIdx.x == 0)
{
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (nx - numStenLeft + threadIdx.x)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// Set the right boundary blocks
if (blockIdx.x == nx / BLOCK_X - 1)
{
arrayLocal[localIdy * nxLocal + threadIdx.x + numStenLeft] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + threadIdx.x];
}
}
// -----------------------------
// Compute the stencil
// -----------------------------
__syncthreads();
stenSet = localIdy * nxLocal + threadIdx.x;
#pragma unroll
for (int k = 0; k < numSten; k++)
{
sum += weigthsLocal[k] * arrayLocal[stenSet + k];
}
__syncthreads();
// -----------------------------
// Copy back to global
// -----------------------------
dataOutput[globalIdy * nx + globalIdx] = sum;
} |
c931842a837802a05993355317205e631589095d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Author: Raffaele Solca' (rasolca@itp.phys.ethz.ch)
//
// This file implements laset_gpu.hpp.
#include "dca/linalg/lapack/multiply_diagonal_gpu.hpp"
#include <cassert>
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
#include "dca/linalg/util/complex_operators_cuda.cu.hpp"
#include "dca/linalg/util/error_cuda.hpp"
#include "dca/linalg/util/stream_functions.hpp"
#include "dca/util/integer_division.hpp"
namespace dca {
namespace linalg {
namespace lapack {
namespace kernels {
// dca::linalg::lapack::kernels::
constexpr int multiply_diag_block_size_x = 128;
constexpr int multiply_diag_block_size_y = 32;
template <typename Type>
__global__ void multiplyDiagonalLeft(int m, int n, const Type* d, int inc_d, const Type* a, int lda,
Type* b, int ldb) {
// Work on a tile of size (blockDim.x x multiply_diag_block_size_y).
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < m) {
int js = blockIdx.y * multiply_diag_block_size_y;
int je = min(n, (blockIdx.y + 1) * blockDim.x);
for (int j = js; j < je; ++j)
b[i + j * ldb] = d[i * inc_d] * a[i + j * lda];
}
}
template <typename Type>
__global__ void multiplyDiagonalRight(int m, int n, const Type* a, int lda, const Type* d,
int inc_d, Type* b, int ldb) {
// Work on a tile of size (blockDim.x x multiply_diag_block_size_y).
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < m) {
int js = blockIdx.y * multiply_diag_block_size_y;
int je = min(n, (blockIdx.y + 1) * blockDim.x);
for (int j = js; j < je; ++j)
b[i + j * ldb] = d[j * inc_d] * a[i + j * lda];
}
}
} // kernels
// dca::linalg::lapack::
template <typename Type>
void multiplyDiagonalLeft_gpu(int m, int n, const Type* d, int inc_d, const Type* a, int lda,
Type* b, int ldb, int thread_id, int stream_id) {
assert(lda >= m);
assert(ldb >= m);
if (m > 0 && n > 0) {
checkErrorsCudaDebug();
int bl_x = dca::util::ceilDiv(m, kernels::multiply_diag_block_size_x);
int bl_y = dca::util::ceilDiv(n, kernels::multiply_diag_block_size_y);
dim3 threads(kernels::multiply_diag_block_size_x);
dim3 blocks(bl_x, bl_y);
hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id);
hipLaunchKernelGGL(( kernels::multiplyDiagonalLeft), dim3(blocks), dim3(threads), 0, stream, m, n, d, inc_d, a, lda, b, ldb);
checkErrorsCudaDebug();
}
}
template void multiplyDiagonalLeft_gpu(int m, int n, const float* d, int inc_d, const float* a,
int lda, float* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const double* d, int inc_d, const double* a,
int lda, double* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const hipComplex* d, int inc_d,
const hipComplex* a, int lda, hipComplex* b, int ldb,
int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const hipDoubleComplex* d, int inc_d,
const hipDoubleComplex* a, int lda, hipDoubleComplex* b,
int ldb, int thread_id, int stream_id);
template <typename Type>
void multiplyDiagonalRight_gpu(int m, int n, const Type* a, int lda, const Type* d, int inc_d,
Type* b, int ldb, int thread_id, int stream_id) {
assert(lda >= m);
assert(ldb >= m);
if (m > 0 && n > 0) {
checkErrorsCudaDebug();
int bl_x = dca::util::ceilDiv(m, kernels::multiply_diag_block_size_x);
int bl_y = dca::util::ceilDiv(n, kernels::multiply_diag_block_size_y);
dim3 threads(kernels::multiply_diag_block_size_x);
dim3 blocks(bl_x, bl_y);
hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id);
hipLaunchKernelGGL(( kernels::multiplyDiagonalRight), dim3(blocks), dim3(threads), 0, stream, m, n, a, lda, d, inc_d, b, ldb);
checkErrorsCudaDebug();
}
}
template void multiplyDiagonalRight_gpu(int m, int n, const float* a, int lda, const float* d,
int inc_d, float* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const double* a, int lda, const double* d,
int inc_d, double* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const hipComplex* a, int lda,
const hipComplex* d, int inc_d, hipComplex* b, int ldb,
int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const hipDoubleComplex* a, int lda,
const hipDoubleComplex* d, int inc_d, hipDoubleComplex* b,
int ldb, int thread_id, int stream_id);
} // lapack
} // linalg
} // dca
| c931842a837802a05993355317205e631589095d.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Author: Raffaele Solca' (rasolca@itp.phys.ethz.ch)
//
// This file implements laset_gpu.hpp.
#include "dca/linalg/lapack/multiply_diagonal_gpu.hpp"
#include <cassert>
#include <cuComplex.h>
#include <cuda_runtime.h>
#include "dca/linalg/util/complex_operators_cuda.cu.hpp"
#include "dca/linalg/util/error_cuda.hpp"
#include "dca/linalg/util/stream_functions.hpp"
#include "dca/util/integer_division.hpp"
namespace dca {
namespace linalg {
namespace lapack {
namespace kernels {
// dca::linalg::lapack::kernels::
constexpr int multiply_diag_block_size_x = 128;
constexpr int multiply_diag_block_size_y = 32;
template <typename Type>
__global__ void multiplyDiagonalLeft(int m, int n, const Type* d, int inc_d, const Type* a, int lda,
Type* b, int ldb) {
// Work on a tile of size (blockDim.x x multiply_diag_block_size_y).
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < m) {
int js = blockIdx.y * multiply_diag_block_size_y;
int je = min(n, (blockIdx.y + 1) * blockDim.x);
for (int j = js; j < je; ++j)
b[i + j * ldb] = d[i * inc_d] * a[i + j * lda];
}
}
template <typename Type>
__global__ void multiplyDiagonalRight(int m, int n, const Type* a, int lda, const Type* d,
int inc_d, Type* b, int ldb) {
// Work on a tile of size (blockDim.x x multiply_diag_block_size_y).
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < m) {
int js = blockIdx.y * multiply_diag_block_size_y;
int je = min(n, (blockIdx.y + 1) * blockDim.x);
for (int j = js; j < je; ++j)
b[i + j * ldb] = d[j * inc_d] * a[i + j * lda];
}
}
} // kernels
// dca::linalg::lapack::
template <typename Type>
void multiplyDiagonalLeft_gpu(int m, int n, const Type* d, int inc_d, const Type* a, int lda,
Type* b, int ldb, int thread_id, int stream_id) {
assert(lda >= m);
assert(ldb >= m);
if (m > 0 && n > 0) {
checkErrorsCudaDebug();
int bl_x = dca::util::ceilDiv(m, kernels::multiply_diag_block_size_x);
int bl_y = dca::util::ceilDiv(n, kernels::multiply_diag_block_size_y);
dim3 threads(kernels::multiply_diag_block_size_x);
dim3 blocks(bl_x, bl_y);
cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id);
kernels::multiplyDiagonalLeft<<<blocks, threads, 0, stream>>>(m, n, d, inc_d, a, lda, b, ldb);
checkErrorsCudaDebug();
}
}
template void multiplyDiagonalLeft_gpu(int m, int n, const float* d, int inc_d, const float* a,
int lda, float* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const double* d, int inc_d, const double* a,
int lda, double* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const cuComplex* d, int inc_d,
const cuComplex* a, int lda, cuComplex* b, int ldb,
int thread_id, int stream_id);
template void multiplyDiagonalLeft_gpu(int m, int n, const cuDoubleComplex* d, int inc_d,
const cuDoubleComplex* a, int lda, cuDoubleComplex* b,
int ldb, int thread_id, int stream_id);
template <typename Type>
void multiplyDiagonalRight_gpu(int m, int n, const Type* a, int lda, const Type* d, int inc_d,
Type* b, int ldb, int thread_id, int stream_id) {
assert(lda >= m);
assert(ldb >= m);
if (m > 0 && n > 0) {
checkErrorsCudaDebug();
int bl_x = dca::util::ceilDiv(m, kernels::multiply_diag_block_size_x);
int bl_y = dca::util::ceilDiv(n, kernels::multiply_diag_block_size_y);
dim3 threads(kernels::multiply_diag_block_size_x);
dim3 blocks(bl_x, bl_y);
cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id);
kernels::multiplyDiagonalRight<<<blocks, threads, 0, stream>>>(m, n, a, lda, d, inc_d, b, ldb);
checkErrorsCudaDebug();
}
}
template void multiplyDiagonalRight_gpu(int m, int n, const float* a, int lda, const float* d,
int inc_d, float* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const double* a, int lda, const double* d,
int inc_d, double* b, int ldb, int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const cuComplex* a, int lda,
const cuComplex* d, int inc_d, cuComplex* b, int ldb,
int thread_id, int stream_id);
template void multiplyDiagonalRight_gpu(int m, int n, const cuDoubleComplex* a, int lda,
const cuDoubleComplex* d, int inc_d, cuDoubleComplex* b,
int ldb, int thread_id, int stream_id);
} // lapack
} // linalg
} // dca
|
d60ba9a00ecd69dc15dccd117ccc3001863421e5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include <timeSeries/jones_transform.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace TimeSeries {
//parameter structure definition
struct JonesTransParam {
int batchSize;
int pValue;
double tolerance;
};
//test fixture class
template
<typename DataT>
class JonesTransTest : public ::testing::TestWithParam<JonesTransParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<JonesTransParam>::GetParam();
nElements = params.batchSize * params.pValue;
//generating random value test input that is stored in row major
std::vector<double> arr1(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<double> realGenerator(0, 1);
std::generate(arr1.begin(), arr1.end(),
[&]() { return realGenerator(dre); });
//>>>>>>>>>>>>>>>>> AR transform golden output generation<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
double *newParams = (double *)malloc(nElements * sizeof(double *));
double *tmp = (double *)malloc(params.pValue * sizeof(double *));
//for every model in the batch
for (int i = 0; i < params.batchSize; ++i) {
//storing the partial autocorrelation of each ar coefficient of a given batch in newParams and the same in another temporary copy
for (int j = 0; j < params.pValue; ++j) {
newParams[i * params.pValue + j] =
((1 - exp(-1 * arr1[i * params.pValue + j])) /
(1 + exp(-1 * arr1[i * params.pValue + j])));
tmp[j] = newParams[i * params.pValue + j];
}
//calculating according to jone's recursive formula: phi(j,k) = phi(j-1,k) - a(j)*phi(j-1,j-k)
for (int j = 1; j < params.pValue; ++j) {
//a is partial autocorrelation for jth coefficient
DataT a = newParams[i * params.pValue + j];
/*the recursive implementation of the transformation with:
- lhs tmp[k] => phi(j,k)
- rhs tmp[k] => phi(j-1,k)
- a => a(j)
- newParam[i*params.pValue + j-k-1] => phi(j-1, j-k)
*/
for (int k = 0; k < j; ++k) {
tmp[k] -= a * newParams[i * params.pValue + (j - k - 1)];
}
//copying it back for the next iteration
for (int iter = 0; iter < j; ++iter) {
newParams[i * params.pValue + iter] = tmp[iter];
}
}
}
//allocating and initializing device memory
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(d_golden_ar_trans, nElements, true);
raft::allocate(d_computed_ar_trans, nElements, true);
raft::allocate(d_params, nElements, true);
raft::update_device(d_params, &arr1[0], (size_t)nElements, stream);
raft::update_device(d_golden_ar_trans, newParams, (size_t)nElements,
stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calling the ar_trans_param CUDA implementation
MLCommon::TimeSeries::jones_transform(d_params, params.batchSize,
params.pValue, d_computed_ar_trans,
true, false, allocator, stream);
//>>>>>>>>>>>>>>>>> MA transform golden output generation<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//for every model in the batch
for (int i = 0; i < params.batchSize; ++i) {
//storing the partial autocorrelation of each ma coefficient of a given batch in newParams and the same in another temporary copy
for (int j = 0; j < params.pValue; ++j) {
newParams[i * params.pValue + j] =
((1 - exp(-1 * arr1[i * params.pValue + j])) /
(1 + exp(-1 * arr1[i * params.pValue + j])));
tmp[j] = newParams[i * params.pValue + j];
}
//calculating according to jone's recursive formula: phi(j,k) = phi(j-1,k) - a(j)*phi(j-1,j-k)
for (int j = 1; j < params.pValue; ++j) {
//a is partial autocorrelation for jth coefficient
DataT a = newParams[i * params.pValue + j];
/*the recursive implementation of the transformation with:
- lhs tmp[k] => phi(j,k)
- rhs tmp[k] => phi(j-1,k)
- a => a(j)
- newParam[i*params.pValue + j-k-1] => phi(j-1, j-k)
*/
for (int k = 0; k < j; ++k) {
tmp[k] += a * newParams[i * params.pValue + (j - k - 1)];
}
//copying it back for the next iteration
for (int iter = 0; iter < j; ++iter) {
newParams[i * params.pValue + iter] = tmp[iter];
}
}
}
//allocating and initializing device memory
raft::allocate(d_golden_ma_trans, nElements, true);
raft::allocate(d_computed_ma_trans, nElements, true);
raft::update_device(d_golden_ma_trans, newParams, (size_t)nElements,
stream);
//calling the ma_param_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_params, params.batchSize,
params.pValue, d_computed_ma_trans,
false, false, allocator, stream);
//>>>>>>>>>>>>>>>>> AR inverse transform <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//allocating and initializing device memory
raft::allocate(d_computed_ar_invtrans, nElements, true);
//calling the ar_param_inverse_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_computed_ar_trans, params.batchSize,
params.pValue, d_computed_ar_invtrans,
true, true, allocator, stream);
//>>>>>>>>>>>>>>>>> MA inverse transform <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
raft::allocate(d_computed_ma_invtrans, nElements, true);
//calling the ma_param_inverse_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_computed_ma_trans, params.batchSize,
params.pValue, d_computed_ma_invtrans,
false, true, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(d_computed_ar_trans));
CUDA_CHECK(hipFree(d_computed_ma_trans));
CUDA_CHECK(hipFree(d_computed_ar_invtrans));
CUDA_CHECK(hipFree(d_computed_ma_invtrans));
CUDA_CHECK(hipFree(d_golden_ar_trans));
CUDA_CHECK(hipFree(d_golden_ma_trans));
CUDA_CHECK(hipFree(d_params));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
JonesTransParam params;
DataT *d_golden_ar_trans = nullptr;
DataT *d_golden_ma_trans = nullptr;
DataT *d_computed_ar_trans = nullptr;
DataT *d_computed_ma_trans = nullptr;
DataT *d_computed_ar_invtrans = nullptr;
DataT *d_computed_ma_invtrans = nullptr;
DataT *d_params = nullptr;
hipStream_t stream;
int nElements = -1;
};
//setting test parameter values
const std::vector<JonesTransParam> inputs = {
{500, 4, 0.001}, {500, 3, 0.001}, {500, 2, 0.001},
{500, 1, 0.001}, {5000, 4, 0.001}, {5000, 3, 0.001},
{5000, 2, 0.001}, {5000, 1, 0.001}, {4, 4, 0.001},
{4, 3, 0.001}, {4, 2, 0.001}, {4, 1, 0.001},
{500000, 4, 0.0001}, {500000, 3, 0.0001}, {500000, 2, 0.0001},
{500000, 1, 0.0001}};
//writing the test suite
typedef JonesTransTest<double> JonesTransTestClass;
TEST_P(JonesTransTestClass, Result) {
ASSERT_TRUE(raft::devArrMatch(d_computed_ar_trans, d_golden_ar_trans,
nElements,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(d_computed_ma_trans, d_golden_ma_trans,
nElements,
raft::CompareApprox<double>(params.tolerance)));
/*
Test verifying the inversion property:
initially generated random coefficients -> ar_param_transform() / ma_param_transform() ->
transformed coefficients -> ar_param_inverse_transform()/ma_param_inverse_transform() ->
initially generated random coefficients
*/
ASSERT_TRUE(raft::devArrMatch(d_computed_ma_invtrans, d_params, nElements,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(d_computed_ar_invtrans, d_params, nElements,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(JonesTrans, JonesTransTestClass,
::testing::ValuesIn(inputs));
} //end namespace TimeSeries
} //end namespace MLCommon
| d60ba9a00ecd69dc15dccd117ccc3001863421e5.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include <timeSeries/jones_transform.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace TimeSeries {
//parameter structure definition
struct JonesTransParam {
int batchSize;
int pValue;
double tolerance;
};
//test fixture class
template
<typename DataT>
class JonesTransTest : public ::testing::TestWithParam<JonesTransParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<JonesTransParam>::GetParam();
nElements = params.batchSize * params.pValue;
//generating random value test input that is stored in row major
std::vector<double> arr1(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<double> realGenerator(0, 1);
std::generate(arr1.begin(), arr1.end(),
[&]() { return realGenerator(dre); });
//>>>>>>>>>>>>>>>>> AR transform golden output generation<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
double *newParams = (double *)malloc(nElements * sizeof(double *));
double *tmp = (double *)malloc(params.pValue * sizeof(double *));
//for every model in the batch
for (int i = 0; i < params.batchSize; ++i) {
//storing the partial autocorrelation of each ar coefficient of a given batch in newParams and the same in another temporary copy
for (int j = 0; j < params.pValue; ++j) {
newParams[i * params.pValue + j] =
((1 - exp(-1 * arr1[i * params.pValue + j])) /
(1 + exp(-1 * arr1[i * params.pValue + j])));
tmp[j] = newParams[i * params.pValue + j];
}
//calculating according to jone's recursive formula: phi(j,k) = phi(j-1,k) - a(j)*phi(j-1,j-k)
for (int j = 1; j < params.pValue; ++j) {
//a is partial autocorrelation for jth coefficient
DataT a = newParams[i * params.pValue + j];
/*the recursive implementation of the transformation with:
- lhs tmp[k] => phi(j,k)
- rhs tmp[k] => phi(j-1,k)
- a => a(j)
- newParam[i*params.pValue + j-k-1] => phi(j-1, j-k)
*/
for (int k = 0; k < j; ++k) {
tmp[k] -= a * newParams[i * params.pValue + (j - k - 1)];
}
//copying it back for the next iteration
for (int iter = 0; iter < j; ++iter) {
newParams[i * params.pValue + iter] = tmp[iter];
}
}
}
//allocating and initializing device memory
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(d_golden_ar_trans, nElements, true);
raft::allocate(d_computed_ar_trans, nElements, true);
raft::allocate(d_params, nElements, true);
raft::update_device(d_params, &arr1[0], (size_t)nElements, stream);
raft::update_device(d_golden_ar_trans, newParams, (size_t)nElements,
stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calling the ar_trans_param CUDA implementation
MLCommon::TimeSeries::jones_transform(d_params, params.batchSize,
params.pValue, d_computed_ar_trans,
true, false, allocator, stream);
//>>>>>>>>>>>>>>>>> MA transform golden output generation<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//for every model in the batch
for (int i = 0; i < params.batchSize; ++i) {
//storing the partial autocorrelation of each ma coefficient of a given batch in newParams and the same in another temporary copy
for (int j = 0; j < params.pValue; ++j) {
newParams[i * params.pValue + j] =
((1 - exp(-1 * arr1[i * params.pValue + j])) /
(1 + exp(-1 * arr1[i * params.pValue + j])));
tmp[j] = newParams[i * params.pValue + j];
}
//calculating according to jone's recursive formula: phi(j,k) = phi(j-1,k) - a(j)*phi(j-1,j-k)
for (int j = 1; j < params.pValue; ++j) {
//a is partial autocorrelation for jth coefficient
DataT a = newParams[i * params.pValue + j];
/*the recursive implementation of the transformation with:
- lhs tmp[k] => phi(j,k)
- rhs tmp[k] => phi(j-1,k)
- a => a(j)
- newParam[i*params.pValue + j-k-1] => phi(j-1, j-k)
*/
for (int k = 0; k < j; ++k) {
tmp[k] += a * newParams[i * params.pValue + (j - k - 1)];
}
//copying it back for the next iteration
for (int iter = 0; iter < j; ++iter) {
newParams[i * params.pValue + iter] = tmp[iter];
}
}
}
//allocating and initializing device memory
raft::allocate(d_golden_ma_trans, nElements, true);
raft::allocate(d_computed_ma_trans, nElements, true);
raft::update_device(d_golden_ma_trans, newParams, (size_t)nElements,
stream);
//calling the ma_param_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_params, params.batchSize,
params.pValue, d_computed_ma_trans,
false, false, allocator, stream);
//>>>>>>>>>>>>>>>>> AR inverse transform <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//allocating and initializing device memory
raft::allocate(d_computed_ar_invtrans, nElements, true);
//calling the ar_param_inverse_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_computed_ar_trans, params.batchSize,
params.pValue, d_computed_ar_invtrans,
true, true, allocator, stream);
//>>>>>>>>>>>>>>>>> MA inverse transform <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
raft::allocate(d_computed_ma_invtrans, nElements, true);
//calling the ma_param_inverse_transform CUDA implementation
MLCommon::TimeSeries::jones_transform(d_computed_ma_trans, params.batchSize,
params.pValue, d_computed_ma_invtrans,
false, true, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(d_computed_ar_trans));
CUDA_CHECK(cudaFree(d_computed_ma_trans));
CUDA_CHECK(cudaFree(d_computed_ar_invtrans));
CUDA_CHECK(cudaFree(d_computed_ma_invtrans));
CUDA_CHECK(cudaFree(d_golden_ar_trans));
CUDA_CHECK(cudaFree(d_golden_ma_trans));
CUDA_CHECK(cudaFree(d_params));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
JonesTransParam params;
DataT *d_golden_ar_trans = nullptr;
DataT *d_golden_ma_trans = nullptr;
DataT *d_computed_ar_trans = nullptr;
DataT *d_computed_ma_trans = nullptr;
DataT *d_computed_ar_invtrans = nullptr;
DataT *d_computed_ma_invtrans = nullptr;
DataT *d_params = nullptr;
cudaStream_t stream;
int nElements = -1;
};
//setting test parameter values
const std::vector<JonesTransParam> inputs = {
{500, 4, 0.001}, {500, 3, 0.001}, {500, 2, 0.001},
{500, 1, 0.001}, {5000, 4, 0.001}, {5000, 3, 0.001},
{5000, 2, 0.001}, {5000, 1, 0.001}, {4, 4, 0.001},
{4, 3, 0.001}, {4, 2, 0.001}, {4, 1, 0.001},
{500000, 4, 0.0001}, {500000, 3, 0.0001}, {500000, 2, 0.0001},
{500000, 1, 0.0001}};
//writing the test suite
typedef JonesTransTest<double> JonesTransTestClass;
TEST_P(JonesTransTestClass, Result) {
ASSERT_TRUE(raft::devArrMatch(d_computed_ar_trans, d_golden_ar_trans,
nElements,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(d_computed_ma_trans, d_golden_ma_trans,
nElements,
raft::CompareApprox<double>(params.tolerance)));
/*
Test verifying the inversion property:
initially generated random coefficients -> ar_param_transform() / ma_param_transform() ->
transformed coefficients -> ar_param_inverse_transform()/ma_param_inverse_transform() ->
initially generated random coefficients
*/
ASSERT_TRUE(raft::devArrMatch(d_computed_ma_invtrans, d_params, nElements,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(d_computed_ar_invtrans, d_params, nElements,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(JonesTrans, JonesTransTestClass,
::testing::ValuesIn(inputs));
} //end namespace TimeSeries
} //end namespace MLCommon
|
13c3b1680ac63804ebd338419893c1f80018833d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void findleft(int *deviceMatrix, int *rowSum, int n){
int rownum = blockIdx.x;
int sum = 0;
int k;
for(k=0; k<n; k++)
sum += deviceMatrix[rownum*n+k];
rowSum[rownum] = sum;
}
int main(){
int n = 20;
int *hostMatrix, *deviceMatrix, *hostRowSum, *deviceRowSum;
int msize = n*n*sizeof(int);
hostMatrix = (int *) malloc(msize);
hostRowSum = (int *) malloc(n*sizeof(int));
hipMalloc((void **) &deviceMatrix, msize);
hipMalloc((void **) &deviceRowSum, msize/n);
int t = 0, i, j;
for(i = 0; i<n; i++){
for(j=0; j<n; j++)
hostMatrix[i*n+j] = t++;
}
hipMemcpy(deviceMatrix, hostMatrix, msize, hipMemcpyHostToDevice);
dim3 dimGrid(n,1);
dim3 dimBlock(1,1,1);
hipLaunchKernelGGL((
findleft), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceMatrix, deviceRowSum, n);
hipDeviceSynchronize();
hipMemcpy(hostRowSum, deviceRowSum, msize/n, hipMemcpyDeviceToHost);
for(i = 0; i<n; i++) printf("%d\n", hostRowSum[i]);
return 0;
} | 13c3b1680ac63804ebd338419893c1f80018833d.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void findleft(int *deviceMatrix, int *rowSum, int n){
int rownum = blockIdx.x;
int sum = 0;
int k;
for(k=0; k<n; k++)
sum += deviceMatrix[rownum*n+k];
rowSum[rownum] = sum;
}
int main(){
int n = 20;
int *hostMatrix, *deviceMatrix, *hostRowSum, *deviceRowSum;
int msize = n*n*sizeof(int);
hostMatrix = (int *) malloc(msize);
hostRowSum = (int *) malloc(n*sizeof(int));
cudaMalloc((void **) &deviceMatrix, msize);
cudaMalloc((void **) &deviceRowSum, msize/n);
int t = 0, i, j;
for(i = 0; i<n; i++){
for(j=0; j<n; j++)
hostMatrix[i*n+j] = t++;
}
cudaMemcpy(deviceMatrix, hostMatrix, msize, cudaMemcpyHostToDevice);
dim3 dimGrid(n,1);
dim3 dimBlock(1,1,1);
findleft<<<dimGrid, dimBlock>>>(deviceMatrix, deviceRowSum, n);
cudaThreadSynchronize();
cudaMemcpy(hostRowSum, deviceRowSum, msize/n, cudaMemcpyDeviceToHost);
for(i = 0; i<n; i++) printf("%d\n", hostRowSum[i]);
return 0;
} |
8aba5231be50342849b19eb563dc5dc963ba6d60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// Copyright (c) 2018, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: dgemm
///
/// PURPOSE: This program tests the efficiency with which a dense matrix
/// dense multiplication is carried out
///
/// USAGE: The program takes as input the matrix order,
/// the number of times the matrix-matrix multiplication
/// is carried out, and, optionally, a tile size for matrix
/// blocking
///
/// <progname> <# iterations> <matrix order>
///
/// The output consists of diagnostics to make sure the
/// algorithm worked, and of timing statistics.
///
/// FUNCTIONS CALLED:
///
/// Other than OpenMP or standard C functions, the following
/// functions are used in this program:
///
/// cblasDgemm()
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, December, 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
#include "prk_cuda.h"
#include "prk_mpi.h"
__global__ void init(int order, double * A, double * B, double * C)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
A[i*order+j] = i;
B[i*order+j] = i;
C[i*order+j] = 0;
}
}
__global__ void init(int order, double * C)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
C[i*order+j] = 0;
}
}
int main(int argc, char * argv[])
{
{
prk::MPI::state mpi(argc,argv);
int np = prk::MPI::size();
int me = prk::MPI::rank();
prk::CUDA::info cuda;
if (me == 0) {
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "MPI/C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl;
cuda.print();
}
int ngpu = cuda.num_gpus();
if (ngpu != np) {
std::cout << "Please run with one MPI process per GPU (single-node only)" << std::endl;
return (np-ngpu);
}
// assign a GPU per MPI process
cuda.set_gpu(me);
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order;
try {
if (argc < 2) {
throw "Usage: <# iterations> <matrix order>";
}
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > ::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
if (me == 0) {
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
}
hipblasHandle_t h;
prk::CUDA::check( hipblasCreate(&h) );
const int tile_size = 32;
dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1);
dim3 dimBlock(tile_size, tile_size, 1);
cuda.checkDims(dimBlock, dimGrid);
//////////////////////////////////////////////////////////////////////
// Allocate space for matrices
//////////////////////////////////////////////////////////////////////
double dgemm_time(0);
const size_t nelems = (size_t)order * (size_t)order;
const size_t bytes = nelems * sizeof(double);
// host buffers
double * h_c;
prk::CUDA::check( hipHostMalloc((void**)&h_c, bytes) );
// device buffers
double * d_a;
double * d_b;
double * d_c;
prk::CUDA::check( hipMalloc((void**)&d_a, bytes) );
prk::CUDA::check( hipMalloc((void**)&d_b, bytes) );
prk::CUDA::check( hipMalloc((void**)&d_c, bytes) );
hipLaunchKernelGGL(( init), dim3(dimGrid), dim3(dimBlock), 0, 0, order, d_a, d_b, d_c);
{
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) {
prk::MPI::barrier();
dgemm_time = prk::wtime();
}
double alpha = 1.0;
double beta = 1.0;
prk::CUDA::check( hipblasDgemm(h,
HIPBLAS_OP_N, HIPBLAS_OP_N, // opA, opB
order, order, order, // m, n, k
&alpha, // alpha
d_a, order, // A, lda
d_b, order, // B, ldb
&beta, // beta
d_c, order) ); // C, ldc
prk::CUDA::check( hipDeviceSynchronize() );
}
prk::MPI::barrier();
dgemm_time = prk::wtime() - dgemm_time;
}
// copy output back to host
prk::CUDA::check( hipMemcpyAsync(&(h_c[0]), d_c, bytes, hipMemcpyDeviceToHost) );
prk::CUDA::check( hipFree(d_c) );
prk::CUDA::check( hipFree(d_b) );
prk::CUDA::check( hipFree(d_a) );
prk::CUDA::check( hipblasDestroy(h) );
prk::CUDA::check( hipDeviceSynchronize() );
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
const double epsilon = 1.0e-8;
const double forder = static_cast<double>(order);
const double reference = 0.25 * ::pow(forder,3) * ::pow(forder-1.0,2) * (iterations+1);
double residuum(0);
const auto checksum = prk::reduce( &(h_c[0]), &(h_c[nelems]), 0.0);
residuum += std::abs(checksum-reference)/reference;
// take the global max to make sure everyone passes...
residuum = prk::MPI::max(residuum);
#ifndef VERBOSE
if (residuum >= epsilon)
#endif
{
for (int r=0; r<np; ++r) {
prk::MPI::barrier();
if (r==me) {
std::cout << "Reference checksum = " << reference << "\n"
<< "Actual checksum = " << residuum << std::endl;
}
}
}
if (residuum < epsilon) {
prk::MPI::barrier();
if (me==0) {
std::cout << "Solution validates" << std::endl;
}
auto time = dgemm_time/iterations;
auto nflops = 2.0 * ::pow(forder,3);
auto rate = 1.0e-6 * nflops/time;
double minrate = prk::MPI::min(rate);
double maxrate = prk::MPI::max(rate);
double avgrate = prk::MPI::avg(rate);
double mintime = prk::MPI::min(time);
double maxtime = prk::MPI::max(time);
double avgtime = prk::MPI::avg(time);
if (me==0) {
std::cout << "MIN Rate (MF/s): " << minrate << " Avg time (s): " << maxtime << std::endl;
std::cout << "MAX Rate (MF/s): " << maxrate << " Avg time (s): " << mintime << std::endl;
std::cout << "AVG Rate (MF/s): " << avgrate << " Avg time (s): " << avgtime << std::endl;
}
}
prk::CUDA::check( hipHostFree(h_c) );
} // prk::MPI:state goes out of scope here
return 0;
}
| 8aba5231be50342849b19eb563dc5dc963ba6d60.cu | ///
/// Copyright (c) 2018, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: dgemm
///
/// PURPOSE: This program tests the efficiency with which a dense matrix
/// dense multiplication is carried out
///
/// USAGE: The program takes as input the matrix order,
/// the number of times the matrix-matrix multiplication
/// is carried out, and, optionally, a tile size for matrix
/// blocking
///
/// <progname> <# iterations> <matrix order>
///
/// The output consists of diagnostics to make sure the
/// algorithm worked, and of timing statistics.
///
/// FUNCTIONS CALLED:
///
/// Other than OpenMP or standard C functions, the following
/// functions are used in this program:
///
/// cblasDgemm()
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, December, 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
#include "prk_cuda.h"
#include "prk_mpi.h"
__global__ void init(int order, double * A, double * B, double * C)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
A[i*order+j] = i;
B[i*order+j] = i;
C[i*order+j] = 0;
}
}
__global__ void init(int order, double * C)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
C[i*order+j] = 0;
}
}
int main(int argc, char * argv[])
{
{
prk::MPI::state mpi(argc,argv);
int np = prk::MPI::size();
int me = prk::MPI::rank();
prk::CUDA::info cuda;
if (me == 0) {
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "MPI/C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl;
cuda.print();
}
int ngpu = cuda.num_gpus();
if (ngpu != np) {
std::cout << "Please run with one MPI process per GPU (single-node only)" << std::endl;
return (np-ngpu);
}
// assign a GPU per MPI process
cuda.set_gpu(me);
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order;
try {
if (argc < 2) {
throw "Usage: <# iterations> <matrix order>";
}
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > std::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
if (me == 0) {
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
}
cublasHandle_t h;
prk::CUDA::check( cublasCreate(&h) );
const int tile_size = 32;
dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1);
dim3 dimBlock(tile_size, tile_size, 1);
cuda.checkDims(dimBlock, dimGrid);
//////////////////////////////////////////////////////////////////////
// Allocate space for matrices
//////////////////////////////////////////////////////////////////////
double dgemm_time(0);
const size_t nelems = (size_t)order * (size_t)order;
const size_t bytes = nelems * sizeof(double);
// host buffers
double * h_c;
prk::CUDA::check( cudaMallocHost((void**)&h_c, bytes) );
// device buffers
double * d_a;
double * d_b;
double * d_c;
prk::CUDA::check( cudaMalloc((void**)&d_a, bytes) );
prk::CUDA::check( cudaMalloc((void**)&d_b, bytes) );
prk::CUDA::check( cudaMalloc((void**)&d_c, bytes) );
init<<<dimGrid, dimBlock>>>(order, d_a, d_b, d_c);
{
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) {
prk::MPI::barrier();
dgemm_time = prk::wtime();
}
double alpha = 1.0;
double beta = 1.0;
prk::CUDA::check( cublasDgemm(h,
CUBLAS_OP_N, CUBLAS_OP_N, // opA, opB
order, order, order, // m, n, k
&alpha, // alpha
d_a, order, // A, lda
d_b, order, // B, ldb
&beta, // beta
d_c, order) ); // C, ldc
prk::CUDA::check( cudaDeviceSynchronize() );
}
prk::MPI::barrier();
dgemm_time = prk::wtime() - dgemm_time;
}
// copy output back to host
prk::CUDA::check( cudaMemcpyAsync(&(h_c[0]), d_c, bytes, cudaMemcpyDeviceToHost) );
prk::CUDA::check( cudaFree(d_c) );
prk::CUDA::check( cudaFree(d_b) );
prk::CUDA::check( cudaFree(d_a) );
prk::CUDA::check( cublasDestroy(h) );
prk::CUDA::check( cudaDeviceSynchronize() );
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
const double epsilon = 1.0e-8;
const double forder = static_cast<double>(order);
const double reference = 0.25 * std::pow(forder,3) * std::pow(forder-1.0,2) * (iterations+1);
double residuum(0);
const auto checksum = prk::reduce( &(h_c[0]), &(h_c[nelems]), 0.0);
residuum += std::abs(checksum-reference)/reference;
// take the global max to make sure everyone passes...
residuum = prk::MPI::max(residuum);
#ifndef VERBOSE
if (residuum >= epsilon)
#endif
{
for (int r=0; r<np; ++r) {
prk::MPI::barrier();
if (r==me) {
std::cout << "Reference checksum = " << reference << "\n"
<< "Actual checksum = " << residuum << std::endl;
}
}
}
if (residuum < epsilon) {
prk::MPI::barrier();
if (me==0) {
std::cout << "Solution validates" << std::endl;
}
auto time = dgemm_time/iterations;
auto nflops = 2.0 * std::pow(forder,3);
auto rate = 1.0e-6 * nflops/time;
double minrate = prk::MPI::min(rate);
double maxrate = prk::MPI::max(rate);
double avgrate = prk::MPI::avg(rate);
double mintime = prk::MPI::min(time);
double maxtime = prk::MPI::max(time);
double avgtime = prk::MPI::avg(time);
if (me==0) {
std::cout << "MIN Rate (MF/s): " << minrate << " Avg time (s): " << maxtime << std::endl;
std::cout << "MAX Rate (MF/s): " << maxrate << " Avg time (s): " << mintime << std::endl;
std::cout << "AVG Rate (MF/s): " << avgrate << " Avg time (s): " << avgtime << std::endl;
}
}
prk::CUDA::check( cudaFreeHost(h_c) );
} // prk::MPI:state goes out of scope here
return 0;
}
|
910206cca8aaa0478fee53cf4e783bfb67ba0805.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY AS IS WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ---------------------------------
/// \file RayCaster_kernels.cu
/// \author Andy Loomis, Benjamin Knorlein
#include "RayCaster_kernels.h"
#include <cutil_inline.h>
#include <cutil_math.h>
struct Ray
{
float3 origin;
float3 direction;
};
struct float3x4
{
float4 m[3];
};
// Forward declarations
__global__
void cuda_volume_render_kernel(float* output, size_t width, size_t height,
float step, float intensity, float cutoff);
// Global variables
static texture<unsigned short, 3, hipReadModeNormalizedFloat> tex;
static __constant__ float4 d_viewport;
static __constant__ float3x4 d_invModelView;
namespace xromm
{
namespace gpu
{
void volume_bind_array(const hipArray* array)
{
// Setup 3D texture.
tex.normalized = true;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
// Bind array to 3D texture.
cutilSafeCall(hipBindTextureToArray(tex, array));
}
void volume_viewport(float x, float y, float width, float height)
{
float4 viewport = make_float4(x, y, width, height);
cutilSafeCall(hipMemcpyToSymbol(d_viewport, &viewport, sizeof(float4)));
}
void volume_render(float* buffer, size_t width, size_t height,
const float* invModelView, float step, float intensity,
float cutoff)
{
// Copy the matrix to the device.
cutilSafeCall(hipMemcpyToSymbol(d_invModelView,
invModelView,
sizeof(float3x4)));
// Calculate the block and grid sizes.
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
// Call the kernel
hipLaunchKernelGGL(( cuda_volume_render_kernel), dim3(gridDim), dim3(blockDim), 0, 0, buffer, width, height,
step, intensity, cutoff);
//This crashes it under windows
//cutilSafeCall(hipDeviceSynchronize());
//cutilSafeCall(hipGetLastError());
}
} // namespace gpu
} // namespace xromm
// Device and Kernel functions
// Intersect a ray with an axis aligned box.
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int box_intersect(Ray ray, float3 boxMin, float3 boxMax, float *_near,
float *_far)
{
// Compute intersection of ray with all six planes.
float3 invDirection = make_float3(1.0f) / ray.direction;
float3 tBot = invDirection*(boxMin-ray.origin);
float3 tTop = invDirection*(boxMax-ray.origin);
// Re-order intersections to find smallest and largest on each axis.
float3 tMin = fminf(tTop, tBot);
float3 tMax = fmaxf(tTop, tBot);
// Find the largest tMin and the smallest tMax.
*_near = fmaxf(fmaxf(tMin.x, tMin.y), tMin.z);
*_far = fminf(fminf(tMax.x, tMax.y), tMax.z);
return *_far > *_near;
}
// Transform vector by matrix (no translation).
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
return make_float3(dot(v, make_float3(M.m[0])),
dot(v, make_float3(M.m[1])),
dot(v, make_float3(M.m[2])));
}
// Transform vector by matrix with translation.
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
return make_float4(dot(v, M.m[0]),
dot(v, M.m[1]),
dot(v, M.m[2]),
1.0f);
}
// Render the volume using ray marching.
__global__
void cuda_volume_render_kernel(float* buffer, size_t width, size_t height,
float step, float intensity, float cutoff)
{
uint x = blockIdx.x*blockDim.x+threadIdx.x;
uint y = blockIdx.y*blockDim.y+threadIdx.y;
if (x > width-1 || y > height-1) {
return;
}
// Calculate the normalized device coordinates using the viewport
float u = d_viewport.x+d_viewport.z*(x/(float)width);
float v = d_viewport.y+d_viewport.w*(y/(float)height);
// Determine the look ray in camera space.
float4 eye = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
float3 look = step*normalize(make_float3(u, v, -2.0f));
// Calculate the ray in world space.
Ray ray = { make_float3(mul(d_invModelView, eye)),
mul(d_invModelView, look) };
// Find intersection with box.
float3 boxMin = make_float3(0.0f, 0.0f, -1.0f);
float3 boxMax = make_float3(1.0f, 1.0f, 0.0f);
float _near;
float _far;
if (!box_intersect(ray, boxMin, boxMax, &_near, &_far)) {
buffer[y*width+x] = 0.0f;
return;
}
// Clamp to near plane.
if (_near < 0.0f) _near = 0.0f;
// Preform the ray marching from back to front.
float t = _far;
float density = 0.0f;
while (t > _near) {
float3 point = ray.origin+t*ray.direction;
float sample = tex3D(tex,point.x,1.0f-point.y,-point.z);
density += sample > cutoff? step*sample: 0.0f;
t -= 1.0f;
}
buffer[y*width+x] = clamp(density/intensity, 0.0f, 1.0f);
}
| 910206cca8aaa0478fee53cf4e783bfb67ba0805.cu | // ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY “AS IS” WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ---------------------------------
/// \file RayCaster_kernels.cu
/// \author Andy Loomis, Benjamin Knorlein
#include "RayCaster_kernels.h"
#include <cutil_inline.h>
#include <cutil_math.h>
struct Ray
{
float3 origin;
float3 direction;
};
struct float3x4
{
float4 m[3];
};
// Forward declarations
__global__
void cuda_volume_render_kernel(float* output, size_t width, size_t height,
float step, float intensity, float cutoff);
// Global variables
static texture<unsigned short, 3, cudaReadModeNormalizedFloat> tex;
static __constant__ float4 d_viewport;
static __constant__ float3x4 d_invModelView;
namespace xromm
{
namespace gpu
{
void volume_bind_array(const cudaArray* array)
{
// Setup 3D texture.
tex.normalized = true;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
// Bind array to 3D texture.
cutilSafeCall(cudaBindTextureToArray(tex, array));
}
void volume_viewport(float x, float y, float width, float height)
{
float4 viewport = make_float4(x, y, width, height);
cutilSafeCall(cudaMemcpyToSymbol(d_viewport, &viewport, sizeof(float4)));
}
void volume_render(float* buffer, size_t width, size_t height,
const float* invModelView, float step, float intensity,
float cutoff)
{
// Copy the matrix to the device.
cutilSafeCall(cudaMemcpyToSymbol(d_invModelView,
invModelView,
sizeof(float3x4)));
// Calculate the block and grid sizes.
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
// Call the kernel
cuda_volume_render_kernel<<<gridDim, blockDim>>>(buffer, width, height,
step, intensity, cutoff);
//This crashes it under windows
//cutilSafeCall(cudaThreadSynchronize());
//cutilSafeCall(cudaGetLastError());
}
} // namespace gpu
} // namespace xromm
// Device and Kernel functions
// Intersect a ray with an axis aligned box.
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int box_intersect(Ray ray, float3 boxMin, float3 boxMax, float *_near,
float *_far)
{
// Compute intersection of ray with all six planes.
float3 invDirection = make_float3(1.0f) / ray.direction;
float3 tBot = invDirection*(boxMin-ray.origin);
float3 tTop = invDirection*(boxMax-ray.origin);
// Re-order intersections to find smallest and largest on each axis.
float3 tMin = fminf(tTop, tBot);
float3 tMax = fmaxf(tTop, tBot);
// Find the largest tMin and the smallest tMax.
*_near = fmaxf(fmaxf(tMin.x, tMin.y), tMin.z);
*_far = fminf(fminf(tMax.x, tMax.y), tMax.z);
return *_far > *_near;
}
// Transform vector by matrix (no translation).
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
return make_float3(dot(v, make_float3(M.m[0])),
dot(v, make_float3(M.m[1])),
dot(v, make_float3(M.m[2])));
}
// Transform vector by matrix with translation.
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
return make_float4(dot(v, M.m[0]),
dot(v, M.m[1]),
dot(v, M.m[2]),
1.0f);
}
// Render the volume using ray marching.
__global__
void cuda_volume_render_kernel(float* buffer, size_t width, size_t height,
float step, float intensity, float cutoff)
{
uint x = blockIdx.x*blockDim.x+threadIdx.x;
uint y = blockIdx.y*blockDim.y+threadIdx.y;
if (x > width-1 || y > height-1) {
return;
}
// Calculate the normalized device coordinates using the viewport
float u = d_viewport.x+d_viewport.z*(x/(float)width);
float v = d_viewport.y+d_viewport.w*(y/(float)height);
// Determine the look ray in camera space.
float4 eye = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
float3 look = step*normalize(make_float3(u, v, -2.0f));
// Calculate the ray in world space.
Ray ray = { make_float3(mul(d_invModelView, eye)),
mul(d_invModelView, look) };
// Find intersection with box.
float3 boxMin = make_float3(0.0f, 0.0f, -1.0f);
float3 boxMax = make_float3(1.0f, 1.0f, 0.0f);
float _near;
float _far;
if (!box_intersect(ray, boxMin, boxMax, &_near, &_far)) {
buffer[y*width+x] = 0.0f;
return;
}
// Clamp to near plane.
if (_near < 0.0f) _near = 0.0f;
// Preform the ray marching from back to front.
float t = _far;
float density = 0.0f;
while (t > _near) {
float3 point = ray.origin+t*ray.direction;
float sample = tex3D(tex,point.x,1.0f-point.y,-point.z);
density += sample > cutoff? step*sample: 0.0f;
t -= 1.0f;
}
buffer[y*width+x] = clamp(density/intensity, 0.0f, 1.0f);
}
|
9fc7ab6a75fbd2948f8dbf25598995e089b16ba7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the F, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include <stdio.h>
__global__ void findBorder(const uchar4* const d_sourceImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_sourceImg[idx].x != 255 || d_sourceImg[idx].y != 255 || d_sourceImg[idx].z != 255){
if(threadIdx.x >= 1) atomicAdd(&d_count[idx - 1], 1);
if(threadIdx.x + 1 < blockDim.x) atomicAdd(&d_count[idx + 1], 1);
if(blockIdx.x >= 1) atomicAdd(&d_count[idx - blockDim.x], 1);
if(blockIdx.x + 1 < gridDim.x) atomicAdd(&d_count[idx + blockDim.x], 1);
atomicAdd(&d_count[idx], 1);
}
d_red[idx] = d_sourceImg[idx].x;
d_green[idx] = d_sourceImg[idx].y;
d_blue[idx] = d_sourceImg[idx].z;
}
__global__ void initialize(const uchar4* const d_sourceImg,
const uchar4* const d_destImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue,
float* d_dif_red,
float* d_dif_green,
float* d_dif_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_sourceImg[idx].x != 255 || d_sourceImg[idx].y != 255 || d_sourceImg[idx].z != 255){
if(d_count[idx] == 5){
d_dif_red[idx] = 4.f*d_sourceImg[idx].x - d_sourceImg[idx - 1].x
- d_sourceImg[idx + 1].x
- d_sourceImg[idx - blockDim.x].x
- d_sourceImg[idx + blockDim.x].x;
d_dif_green[idx] = 4.f*d_sourceImg[idx].y - d_sourceImg[idx - 1].y
- d_sourceImg[idx + 1].y
- d_sourceImg[idx - blockDim.x].y
- d_sourceImg[idx + blockDim.x].y;
d_dif_blue[idx] = 4.f*d_sourceImg[idx].z - d_sourceImg[idx - 1].z
- d_sourceImg[idx + 1].z
- d_sourceImg[idx - blockDim.x].z
- d_sourceImg[idx + blockDim.x].z;
}else{
d_red[idx] = d_destImg[idx].x;
d_green[idx]= d_destImg[idx].y;
d_blue[idx] = d_destImg[idx].z;
}
}
}
__global__ void run(const uchar4* const d_sourceImg,
unsigned int* d_count,
float* d_prev_r,
float* d_prev_g,
float* d_prev_b,
float* d_next_r,
float* d_next_g,
float* d_next_b,
float* d_dif_r,
float* d_dif_g,
float* d_dif_b)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_count[idx] == 5){
d_next_r[idx] = min(255.f,max(0.f, ( d_dif_r[idx]
+ d_prev_r[idx - 1]
+ d_prev_r[idx + 1]
+ d_prev_r[idx - blockDim.x]
+ d_prev_r[idx + blockDim.x])/4.f));
d_next_g[idx] = min(255.f,max(0.f, ( d_dif_g[idx]
+ d_prev_g[idx - 1]
+ d_prev_g[idx + 1]
+ d_prev_g[idx - blockDim.x]
+ d_prev_g[idx + blockDim.x])/4.f));
d_next_b[idx] = min(255.f,max(0.f, ( d_dif_b[idx]
+ d_prev_b[idx - 1]
+ d_prev_b[idx + 1]
+ d_prev_b[idx - blockDim.x]
+ d_prev_b[idx + blockDim.x])/4.f));
}
else{
d_next_r[idx] = d_prev_r[idx];
d_next_g[idx] = d_prev_g[idx];
d_next_b[idx] = d_prev_b[idx];
}
}
__global__ void finalize(uchar4* d_blendedImg,
uchar4* d_destImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_count[idx] == 5){
d_blendedImg[idx].x = d_red[idx];
d_blendedImg[idx].y = d_green[idx];
d_blendedImg[idx].z = d_blue[idx];
}else{
d_blendedImg[idx] = d_destImg[idx];
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
uchar4* d_sourceImg;
uchar4* d_destImg;
uchar4* d_blendedImg;
unsigned int* d_count;
float *d_dif_r,*d_dif_g,*d_dif_b;
float *d_prev_r,*d_prev_g,*d_prev_b;
float *d_next_r,*d_next_g,*d_next_b;
checkCudaErrors(hipMalloc(&d_blendedImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_sourceImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_destImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_count, sizeof(unsigned int)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_dif_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_dif_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_dif_b, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_prev_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_prev_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_prev_b, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_next_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_next_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(hipMalloc(&d_next_b, sizeof(float)*numRowsSource*numColsSource));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4)*numRowsSource*numColsSource, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_destImg, h_destImg, sizeof(uchar4)*numRowsSource*numColsSource, hipMemcpyHostToDevice));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemset(d_count, 0, sizeof(unsigned int)*numRowsSource*numColsSource));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( findBorder) , dim3(numRowsSource),dim3(numColsSource), 0, 0, d_sourceImg, d_count, d_prev_r, d_prev_g, d_prev_b);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( initialize) , dim3(numRowsSource),dim3(numColsSource), 0, 0, d_sourceImg, d_destImg, d_count, d_prev_r, d_prev_g, d_prev_b, d_dif_r, d_dif_g, d_dif_b);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
for(int i=0;i < 800; i++){
hipLaunchKernelGGL(( run) , dim3(numRowsSource),dim3(numColsSource), 0, 0, d_sourceImg, d_count, d_prev_r, d_prev_g, d_prev_b, d_next_r, d_next_g, d_next_b, d_dif_r, d_dif_g, d_dif_b);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
std::swap(d_next_r, d_prev_r);
std::swap(d_next_g, d_prev_g);
std::swap(d_next_b, d_prev_b);
}
hipLaunchKernelGGL(( finalize) , dim3(numRowsSource),dim3(numColsSource), 0, 0, d_blendedImg, d_destImg, d_count, d_prev_r, d_prev_g, d_prev_b);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4)*numRowsSource*numColsSource, hipMemcpyDeviceToHost));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
to catch any errors that happened while executing the kernel.
*/
}
| 9fc7ab6a75fbd2948f8dbf25598995e089b16ba7.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the F, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include <stdio.h>
__global__ void findBorder(const uchar4* const d_sourceImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_sourceImg[idx].x != 255 || d_sourceImg[idx].y != 255 || d_sourceImg[idx].z != 255){
if(threadIdx.x >= 1) atomicAdd(&d_count[idx - 1], 1);
if(threadIdx.x + 1 < blockDim.x) atomicAdd(&d_count[idx + 1], 1);
if(blockIdx.x >= 1) atomicAdd(&d_count[idx - blockDim.x], 1);
if(blockIdx.x + 1 < gridDim.x) atomicAdd(&d_count[idx + blockDim.x], 1);
atomicAdd(&d_count[idx], 1);
}
d_red[idx] = d_sourceImg[idx].x;
d_green[idx] = d_sourceImg[idx].y;
d_blue[idx] = d_sourceImg[idx].z;
}
__global__ void initialize(const uchar4* const d_sourceImg,
const uchar4* const d_destImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue,
float* d_dif_red,
float* d_dif_green,
float* d_dif_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_sourceImg[idx].x != 255 || d_sourceImg[idx].y != 255 || d_sourceImg[idx].z != 255){
if(d_count[idx] == 5){
d_dif_red[idx] = 4.f*d_sourceImg[idx].x - d_sourceImg[idx - 1].x
- d_sourceImg[idx + 1].x
- d_sourceImg[idx - blockDim.x].x
- d_sourceImg[idx + blockDim.x].x;
d_dif_green[idx] = 4.f*d_sourceImg[idx].y - d_sourceImg[idx - 1].y
- d_sourceImg[idx + 1].y
- d_sourceImg[idx - blockDim.x].y
- d_sourceImg[idx + blockDim.x].y;
d_dif_blue[idx] = 4.f*d_sourceImg[idx].z - d_sourceImg[idx - 1].z
- d_sourceImg[idx + 1].z
- d_sourceImg[idx - blockDim.x].z
- d_sourceImg[idx + blockDim.x].z;
}else{
d_red[idx] = d_destImg[idx].x;
d_green[idx]= d_destImg[idx].y;
d_blue[idx] = d_destImg[idx].z;
}
}
}
__global__ void run(const uchar4* const d_sourceImg,
unsigned int* d_count,
float* d_prev_r,
float* d_prev_g,
float* d_prev_b,
float* d_next_r,
float* d_next_g,
float* d_next_b,
float* d_dif_r,
float* d_dif_g,
float* d_dif_b)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_count[idx] == 5){
d_next_r[idx] = min(255.f,max(0.f, ( d_dif_r[idx]
+ d_prev_r[idx - 1]
+ d_prev_r[idx + 1]
+ d_prev_r[idx - blockDim.x]
+ d_prev_r[idx + blockDim.x])/4.f));
d_next_g[idx] = min(255.f,max(0.f, ( d_dif_g[idx]
+ d_prev_g[idx - 1]
+ d_prev_g[idx + 1]
+ d_prev_g[idx - blockDim.x]
+ d_prev_g[idx + blockDim.x])/4.f));
d_next_b[idx] = min(255.f,max(0.f, ( d_dif_b[idx]
+ d_prev_b[idx - 1]
+ d_prev_b[idx + 1]
+ d_prev_b[idx - blockDim.x]
+ d_prev_b[idx + blockDim.x])/4.f));
}
else{
d_next_r[idx] = d_prev_r[idx];
d_next_g[idx] = d_prev_g[idx];
d_next_b[idx] = d_prev_b[idx];
}
}
__global__ void finalize(uchar4* d_blendedImg,
uchar4* d_destImg,
unsigned int* d_count,
float* d_red,
float* d_green,
float* d_blue)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(d_count[idx] == 5){
d_blendedImg[idx].x = d_red[idx];
d_blendedImg[idx].y = d_green[idx];
d_blendedImg[idx].z = d_blue[idx];
}else{
d_blendedImg[idx] = d_destImg[idx];
}
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
uchar4* d_sourceImg;
uchar4* d_destImg;
uchar4* d_blendedImg;
unsigned int* d_count;
float *d_dif_r,*d_dif_g,*d_dif_b;
float *d_prev_r,*d_prev_g,*d_prev_b;
float *d_next_r,*d_next_g,*d_next_b;
checkCudaErrors(cudaMalloc(&d_blendedImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_sourceImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_destImg, sizeof(uchar4)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_count, sizeof(unsigned int)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_dif_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_dif_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_dif_b, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_prev_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_prev_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_prev_b, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_next_r, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_next_g, sizeof(float)*numRowsSource*numColsSource));
checkCudaErrors(cudaMalloc(&d_next_b, sizeof(float)*numRowsSource*numColsSource));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4)*numRowsSource*numColsSource, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, sizeof(uchar4)*numRowsSource*numColsSource, cudaMemcpyHostToDevice));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemset(d_count, 0, sizeof(unsigned int)*numRowsSource*numColsSource));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
findBorder <<<numRowsSource,numColsSource>>> (d_sourceImg, d_count, d_prev_r, d_prev_g, d_prev_b);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
initialize <<<numRowsSource,numColsSource>>> (d_sourceImg, d_destImg, d_count, d_prev_r, d_prev_g, d_prev_b, d_dif_r, d_dif_g, d_dif_b);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
for(int i=0;i < 800; i++){
run <<<numRowsSource,numColsSource>>> (d_sourceImg, d_count, d_prev_r, d_prev_g, d_prev_b, d_next_r, d_next_g, d_next_b, d_dif_r, d_dif_g, d_dif_b);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
std::swap(d_next_r, d_prev_r);
std::swap(d_next_g, d_prev_g);
std::swap(d_next_b, d_prev_b);
}
finalize <<<numRowsSource,numColsSource>>> (d_blendedImg, d_destImg, d_count, d_prev_r, d_prev_g, d_prev_b);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4)*numRowsSource*numColsSource, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
to catch any errors that happened while executing the kernel.
*/
}
|
3ab68677d983760df38566d9c9a825e3ced852f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define N 1024
#define blockCount 1
//http://cacs.usc.edu/education/cs596/src/cuda/pi.cu
__global__ void calculatePi(float *out)
{
//Shared memeory for sum, only works with a blockCount of 1
__shared__ float cache[N];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int stepSize = blockDim.x * gridDim.x;
// y = root(1 - x^2)
// calculate y from x = threadID/N (giving an over estimate)
//sum of all y*x*4 = solution
float x = float(1) /(blockCount*N);
float thisX = float(threadID) / (blockCount*N);
float y;
float area = 0.0f;
//Increase the accuracy of the estimation by further splitting each thread rectangle into 3
int max = 3;
for (int i = 0; i < max; i++)
{
thisX += i * (x/ max);
y = sqrt(1 - thisX * thisX);
area += y * (x / max) * 4;
}
cache[threadID] = area;
__syncthreads();
if (threadIdx.x == 0)
{
float sum = 0.0;
for (int i = 0; i < N; i++)
{
sum += cache[i];
}
*out = sum;
}
}
int main()
{
float out, *d_out;
hipMalloc((void**)&d_out, sizeof(float));
hipMemcpy(d_out, &out, sizeof(float), hipMemcpyHostToDevice);
//Parallel pi calculation
hipLaunchKernelGGL(( calculatePi), dim3(blockCount), dim3(N) , 0, 0, d_out);
hipMemcpy(&out, d_out, sizeof(float), hipMemcpyDeviceToHost);
printf("%f\n", out);
hipFree(d_out);
return 0;
} | 3ab68677d983760df38566d9c9a825e3ced852f5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define N 1024
#define blockCount 1
//http://cacs.usc.edu/education/cs596/src/cuda/pi.cu
__global__ void calculatePi(float *out)
{
//Shared memeory for sum, only works with a blockCount of 1
__shared__ float cache[N];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int stepSize = blockDim.x * gridDim.x;
// y = root(1 - x^2)
// calculate y from x = threadID/N (giving an over estimate)
//sum of all y*x*4 = solution
float x = float(1) /(blockCount*N);
float thisX = float(threadID) / (blockCount*N);
float y;
float area = 0.0f;
//Increase the accuracy of the estimation by further splitting each thread rectangle into 3
int max = 3;
for (int i = 0; i < max; i++)
{
thisX += i * (x/ max);
y = sqrt(1 - thisX * thisX);
area += y * (x / max) * 4;
}
cache[threadID] = area;
__syncthreads();
if (threadIdx.x == 0)
{
float sum = 0.0;
for (int i = 0; i < N; i++)
{
sum += cache[i];
}
*out = sum;
}
}
int main()
{
float out, *d_out;
cudaMalloc((void**)&d_out, sizeof(float));
cudaMemcpy(d_out, &out, sizeof(float), cudaMemcpyHostToDevice);
//Parallel pi calculation
calculatePi<<<blockCount, N >>>(d_out);
cudaMemcpy(&out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", out);
cudaFree(d_out);
return 0;
} |
49c73f4d403f13b44bea1fd679a52d0c8b6762d2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// Sphinx: #1
#include <stdlib.h>
#include <stdio.h>
#include <unordered_map>
#include <vector>
#include <cassert>
#include <hip/hip_runtime.h>
#include <cutensornet.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSORNET_STATUS_SUCCESS ) \
{ printf("Error: %s in line %d\n", cutensornetGetErrorString(err), __LINE__); \
fflush(stdout); \
} \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != hipSuccess ) \
{ printf("CUDA Error: %s in line %d\n", hipGetErrorString(err), __LINE__); \
fflush(stdout); \
} \
};
struct GPUTimer
{
GPUTimer(hipStream_t stream): stream_(stream)
{
hipEventCreate(&start_);
hipEventCreate(&stop_);
}
~GPUTimer()
{
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
void start()
{
hipEventRecord(start_, stream_);
}
float seconds()
{
hipEventRecord(stop_, stream_);
hipEventSynchronize(stop_);
float time;
hipEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
hipEvent_t start_, stop_;
hipStream_t stream_;
};
int main()
{
static_assert(sizeof(size_t) == sizeof(int64_t), "Please build this sample on a 64-bit architecture!");
bool verbose = true;
// Check cuTensorNet version
const size_t cuTensornetVersion = cutensornetGetVersion();
if(verbose)
printf("cuTensorNet version: %ld\n", cuTensornetVersion);
// Set GPU device
int numDevices {0};
HANDLE_CUDA_ERROR( hipGetDeviceCount(&numDevices) );
const int deviceId = 0;
HANDLE_CUDA_ERROR( hipSetDevice(deviceId) );
hipDeviceProp_t prop;
HANDLE_CUDA_ERROR( hipGetDeviceProperties(&prop, deviceId) );
if(verbose) {
printf("===== device info ======\n");
printf("GPU-name:%s\n", prop.name);
printf("GPU-clock:%d\n", prop.clockRate);
printf("GPU-memoryClock:%d\n", prop.memoryClockRate);
printf("GPU-nSM:%d\n", prop.multiProcessorCount);
printf("GPU-major:%d\n", prop.major);
printf("GPU-minor:%d\n", prop.minor);
printf("========================\n");
}
typedef float floatType;
hipDataType typeData = HIP_R_32F;
cutensornetComputeType_t typeCompute = CUTENSORNET_COMPUTE_32F;
if(verbose)
printf("Included headers and defined data types\n");
// Sphinx: #2
/**********************
* Computing: R_{k,l} = A_{a,b,c,d,e,f} B_{b,g,h,e,i,j} C_{m,a,g,f,i,k} D_{l,c,h,d,j,m}
**********************/
constexpr int32_t numInputs = 4;
// Create vectors of tensor modes
std::vector<int32_t> modesA{'a','b','c','d','e','f'};
std::vector<int32_t> modesB{'b','g','h','e','i','j'};
std::vector<int32_t> modesC{'m','a','g','f','i','k'};
std::vector<int32_t> modesD{'l','c','h','d','j','m'};
std::vector<int32_t> modesR{'k','l'};
// Set mode extents
std::unordered_map<int32_t, int64_t> extent;
extent['a'] = 16;
extent['b'] = 16;
extent['c'] = 16;
extent['d'] = 16;
extent['e'] = 16;
extent['f'] = 16;
extent['g'] = 16;
extent['h'] = 16;
extent['i'] = 16;
extent['j'] = 16;
extent['k'] = 16;
extent['l'] = 16;
extent['m'] = 16;
// Create a vector of extents for each tensor
std::vector<int64_t> extentA;
for (auto mode : modesA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentB;
for (auto mode : modesB)
extentB.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modesC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentD;
for (auto mode : modesD)
extentD.push_back(extent[mode]);
std::vector<int64_t> extentR;
for (auto mode : modesR)
extentR.push_back(extent[mode]);
if(verbose)
printf("Defined tensor network, modes, and extents\n");
// Sphinx: #3
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modesA)
elementsA *= extent[mode];
size_t elementsB = 1;
for (auto mode : modesB)
elementsB *= extent[mode];
size_t elementsC = 1;
for (auto mode : modesC)
elementsC *= extent[mode];
size_t elementsD = 1;
for (auto mode : modesD)
elementsD *= extent[mode];
size_t elementsR = 1;
for (auto mode : modesR)
elementsR *= extent[mode];
size_t sizeA = sizeof(floatType) * elementsA;
size_t sizeB = sizeof(floatType) * elementsB;
size_t sizeC = sizeof(floatType) * elementsC;
size_t sizeD = sizeof(floatType) * elementsD;
size_t sizeR = sizeof(floatType) * elementsR;
if(verbose)
printf("Total GPU memory used for tensor storage: %.2f GiB\n",
(sizeA + sizeB + sizeC + sizeD + sizeR) / 1024. /1024. / 1024);
void* rawDataIn_d[numInputs];
void* R_d;
HANDLE_CUDA_ERROR( hipMalloc((void**) &rawDataIn_d[0], sizeA) );
HANDLE_CUDA_ERROR( hipMalloc((void**) &rawDataIn_d[1], sizeB) );
HANDLE_CUDA_ERROR( hipMalloc((void**) &rawDataIn_d[2], sizeC) );
HANDLE_CUDA_ERROR( hipMalloc((void**) &rawDataIn_d[3], sizeD) );
HANDLE_CUDA_ERROR( hipMalloc((void**) &R_d, sizeR));
floatType *A = (floatType*) malloc(sizeof(floatType) * elementsA);
floatType *B = (floatType*) malloc(sizeof(floatType) * elementsB);
floatType *C = (floatType*) malloc(sizeof(floatType) * elementsC);
floatType *D = (floatType*) malloc(sizeof(floatType) * elementsD);
floatType *R = (floatType*) malloc(sizeof(floatType) * elementsR);
if (A == NULL || B == NULL || C == NULL || D == NULL || R == NULL)
{
printf("Error: Host memory allocation failed!\n");
return -1;
}
/*******************
* Initialize data
*******************/
memset(R, 0, sizeof(floatType) * elementsR);
for (uint64_t i = 0; i < elementsA; i++)
A[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsB; i++)
B[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsC; i++)
C[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsD; i++)
D[i] = ((floatType) rand()) / RAND_MAX;
HANDLE_CUDA_ERROR( hipMemcpy(rawDataIn_d[0], A, sizeA, hipMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( hipMemcpy(rawDataIn_d[1], B, sizeB, hipMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( hipMemcpy(rawDataIn_d[2], C, sizeC, hipMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( hipMemcpy(rawDataIn_d[3], D, sizeD, hipMemcpyHostToDevice) );
if(verbose)
printf("Allocated GPU memory for data, and initialize data\n");
// Sphinx: #4
/*************************
* cuTensorNet
*************************/
hipStream_t stream;
hipStreamCreate(&stream);
cutensornetHandle_t handle;
HANDLE_ERROR( cutensornetCreate(&handle) );
const int32_t nmodeA = modesA.size();
const int32_t nmodeB = modesB.size();
const int32_t nmodeC = modesC.size();
const int32_t nmodeD = modesD.size();
const int32_t nmodeR = modesR.size();
/*******************************
* Create Network Descriptor
*******************************/
const int32_t* modesIn[] = {modesA.data(), modesB.data(), modesC.data(), modesD.data()};
int32_t const numModesIn[] = {nmodeA, nmodeB, nmodeC, nmodeD};
const int64_t* extentsIn[] = {extentA.data(), extentB.data(), extentC.data(), extentD.data()};
const int64_t* stridesIn[] = {NULL, NULL, NULL, NULL}; // strides are optional; if no stride is provided, cuTensorNet assumes a generalized column-major data layout
// Set up tensor network
cutensornetNetworkDescriptor_t descNet;
HANDLE_ERROR( cutensornetCreateNetworkDescriptor(handle,
numInputs, numModesIn, extentsIn, stridesIn, modesIn, NULL,
nmodeR, extentR.data(), /*stridesOut = */NULL, modesR.data(),
typeData, typeCompute,
&descNet) );
if(verbose)
printf("Initialized the cuTensorNet library and created a tensor network descriptor\n");
// Sphinx: #5
/*******************************
* Choose workspace limit based on available resources.
*******************************/
size_t freeMem, totalMem;
HANDLE_CUDA_ERROR( hipMemGetInfo(&freeMem, &totalMem) );
uint64_t workspaceLimit = (uint64_t)((double)freeMem * 0.9);
if(verbose)
printf("Workspace limit = %lu\n", workspaceLimit);
/*******************************
* Find "optimal" contraction order and slicing
*******************************/
cutensornetContractionOptimizerConfig_t optimizerConfig;
HANDLE_ERROR( cutensornetCreateContractionOptimizerConfig(handle, &optimizerConfig) );
// Set the desired number of hyper-samples (defaults to 0)
int32_t num_hypersamples = 8;
HANDLE_ERROR( cutensornetContractionOptimizerConfigSetAttribute(handle,
optimizerConfig,
CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_SAMPLES,
&num_hypersamples,
sizeof(num_hypersamples)) );
// Create contraction optimizer info and find an optimized contraction path
cutensornetContractionOptimizerInfo_t optimizerInfo;
HANDLE_ERROR( cutensornetCreateContractionOptimizerInfo(handle, descNet, &optimizerInfo) );
HANDLE_ERROR( cutensornetContractionOptimize(handle,
descNet,
optimizerConfig,
workspaceLimit,
optimizerInfo) );
// Query the number of slices the tensor network execution will be split into
int64_t numSlices = 0;
HANDLE_ERROR( cutensornetContractionOptimizerInfoGetAttribute(
handle,
optimizerInfo,
CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICES,
&numSlices,
sizeof(numSlices)) );
assert(numSlices > 0);
if(verbose)
printf("Found an optimized contraction path using cuTensorNet optimizer\n");
// Sphinx: #6
/*******************************
* Create workspace descriptor, allocate workspace, and set it.
*******************************/
cutensornetWorkspaceDescriptor_t workDesc;
HANDLE_ERROR( cutensornetCreateWorkspaceDescriptor(handle, &workDesc) );
int64_t requiredWorkspaceSize = 0;
HANDLE_ERROR( cutensornetWorkspaceComputeContractionSizes(handle,
descNet,
optimizerInfo,
workDesc) );
HANDLE_ERROR( cutensornetWorkspaceGetMemorySize(handle,
workDesc,
CUTENSORNET_WORKSIZE_PREF_MIN,
CUTENSORNET_MEMSPACE_DEVICE,
CUTENSORNET_WORKSPACE_SCRATCH,
&requiredWorkspaceSize) );
void* work = nullptr;
HANDLE_CUDA_ERROR( hipMalloc(&work, requiredWorkspaceSize) );
HANDLE_ERROR( cutensornetWorkspaceSetMemory(handle,
workDesc,
CUTENSORNET_MEMSPACE_DEVICE,
CUTENSORNET_WORKSPACE_SCRATCH,
work,
requiredWorkspaceSize) );
if(verbose)
printf("Allocated and set up the GPU workspace\n");
// Sphinx: #7
/*******************************
* Initialize the pairwise contraction plan (for cuTENSOR).
*******************************/
cutensornetContractionPlan_t plan;
HANDLE_ERROR( cutensornetCreateContractionPlan(handle,
descNet,
optimizerInfo,
workDesc,
&plan) );
/*******************************
* Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
* for each pairwise tensor contraction.
*******************************/
cutensornetContractionAutotunePreference_t autotunePref;
HANDLE_ERROR( cutensornetCreateContractionAutotunePreference(handle,
&autotunePref) );
const int numAutotuningIterations = 5; // may be 0
HANDLE_ERROR( cutensornetContractionAutotunePreferenceSetAttribute(
handle,
autotunePref,
CUTENSORNET_CONTRACTION_AUTOTUNE_MAX_ITERATIONS,
&numAutotuningIterations,
sizeof(numAutotuningIterations)) );
// Modify the plan again to find the best pair-wise contractions
HANDLE_ERROR( cutensornetContractionAutotune(handle,
plan,
rawDataIn_d,
R_d,
workDesc,
autotunePref,
stream) );
HANDLE_ERROR( cutensornetDestroyContractionAutotunePreference(autotunePref) );
if(verbose)
printf("Created a contraction plan for cuTensorNet and optionally auto-tuned it\n");
// Sphinx: #8
/**********************
* Execute the tensor network contraction
**********************/
// Create a cutensornetSliceGroup_t object from a range of slice IDs
cutensornetSliceGroup_t sliceGroup{};
HANDLE_ERROR( cutensornetCreateSliceGroupFromIDRange(handle, 0, numSlices, 1, &sliceGroup) );
GPUTimer timer {stream};
double minTimeCUTENSORNET = 1e100;
const int numRuns = 3; // number of repeats to get stable performance results
for (int i = 0; i < numRuns; ++i)
{
HANDLE_CUDA_ERROR( hipMemcpy(R_d, R, sizeR, hipMemcpyHostToDevice) ); // restore the output tensor on GPU
HANDLE_CUDA_ERROR( hipDeviceSynchronize() );
/*
* Contract all slices of the tensor network
*/
timer.start();
int32_t accumulateOutput = 0; // output tensor data will be overwritten
HANDLE_ERROR( cutensornetContractSlices(handle,
plan,
rawDataIn_d,
R_d,
accumulateOutput,
workDesc,
sliceGroup, // alternatively, NULL can also be used to contract over all slices instead of specifying a sliceGroup object
stream) );
// Synchronize and measure best timing
auto time = timer.seconds();
minTimeCUTENSORNET = (time > minTimeCUTENSORNET) ? minTimeCUTENSORNET : time;
}
if(verbose)
printf("Contracted the tensor network, each slice used the same contraction plan\n");
// Print the 1-norm of the output tensor (verification)
HANDLE_CUDA_ERROR( hipStreamSynchronize(stream) );
HANDLE_CUDA_ERROR( hipMemcpy(R, R_d, sizeR, hipMemcpyDeviceToHost) ); // restore the output tensor on Host
double norm1 = 0.0;
for (int64_t i = 0; i < elementsR; ++i) {
norm1 += std::abs(R[i]);
}
if(verbose)
printf("Computed the 1-norm of the output tensor: %e\n", norm1);
/*************************/
// Query the total Flop count for the tensor network contraction
double flops {0.0};
HANDLE_ERROR( cutensornetContractionOptimizerInfoGetAttribute(
handle,
optimizerInfo,
CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT,
&flops,
sizeof(flops)) );
if(verbose) {
printf("Number of tensor network slices = %ld\n", numSlices);
printf("Tensor network contraction time (ms) = %.3f\n", minTimeCUTENSORNET * 1000.f);
}
// Free cuTensorNet resources
HANDLE_ERROR( cutensornetDestroySliceGroup(sliceGroup) );
HANDLE_ERROR( cutensornetDestroyContractionPlan(plan) );
HANDLE_ERROR( cutensornetDestroyWorkspaceDescriptor(workDesc) );
HANDLE_ERROR( cutensornetDestroyContractionOptimizerInfo(optimizerInfo) );
HANDLE_ERROR( cutensornetDestroyContractionOptimizerConfig(optimizerConfig) );
HANDLE_ERROR( cutensornetDestroyNetworkDescriptor(descNet) );
HANDLE_ERROR( cutensornetDestroy(handle) );
// Free Host memory resources
if (R) free(R);
if (D) free(D);
if (C) free(C);
if (B) free(B);
if (A) free(A);
// Free GPU memory resources
if (work) hipFree(work);
if (R_d) hipFree(R_d);
if (rawDataIn_d[0]) hipFree(rawDataIn_d[0]);
if (rawDataIn_d[1]) hipFree(rawDataIn_d[1]);
if (rawDataIn_d[2]) hipFree(rawDataIn_d[2]);
if (rawDataIn_d[3]) hipFree(rawDataIn_d[3]);
if(verbose)
printf("Freed resources and exited\n");
return 0;
}
| 49c73f4d403f13b44bea1fd679a52d0c8b6762d2.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// Sphinx: #1
#include <stdlib.h>
#include <stdio.h>
#include <unordered_map>
#include <vector>
#include <cassert>
#include <cuda_runtime.h>
#include <cutensornet.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSORNET_STATUS_SUCCESS ) \
{ printf("Error: %s in line %d\n", cutensornetGetErrorString(err), __LINE__); \
fflush(stdout); \
} \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != cudaSuccess ) \
{ printf("CUDA Error: %s in line %d\n", cudaGetErrorString(err), __LINE__); \
fflush(stdout); \
} \
};
struct GPUTimer
{
GPUTimer(cudaStream_t stream): stream_(stream)
{
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
}
~GPUTimer()
{
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start()
{
cudaEventRecord(start_, stream_);
}
float seconds()
{
cudaEventRecord(stop_, stream_);
cudaEventSynchronize(stop_);
float time;
cudaEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
cudaEvent_t start_, stop_;
cudaStream_t stream_;
};
int main()
{
static_assert(sizeof(size_t) == sizeof(int64_t), "Please build this sample on a 64-bit architecture!");
bool verbose = true;
// Check cuTensorNet version
const size_t cuTensornetVersion = cutensornetGetVersion();
if(verbose)
printf("cuTensorNet version: %ld\n", cuTensornetVersion);
// Set GPU device
int numDevices {0};
HANDLE_CUDA_ERROR( cudaGetDeviceCount(&numDevices) );
const int deviceId = 0;
HANDLE_CUDA_ERROR( cudaSetDevice(deviceId) );
cudaDeviceProp prop;
HANDLE_CUDA_ERROR( cudaGetDeviceProperties(&prop, deviceId) );
if(verbose) {
printf("===== device info ======\n");
printf("GPU-name:%s\n", prop.name);
printf("GPU-clock:%d\n", prop.clockRate);
printf("GPU-memoryClock:%d\n", prop.memoryClockRate);
printf("GPU-nSM:%d\n", prop.multiProcessorCount);
printf("GPU-major:%d\n", prop.major);
printf("GPU-minor:%d\n", prop.minor);
printf("========================\n");
}
typedef float floatType;
cudaDataType_t typeData = CUDA_R_32F;
cutensornetComputeType_t typeCompute = CUTENSORNET_COMPUTE_32F;
if(verbose)
printf("Included headers and defined data types\n");
// Sphinx: #2
/**********************
* Computing: R_{k,l} = A_{a,b,c,d,e,f} B_{b,g,h,e,i,j} C_{m,a,g,f,i,k} D_{l,c,h,d,j,m}
**********************/
constexpr int32_t numInputs = 4;
// Create vectors of tensor modes
std::vector<int32_t> modesA{'a','b','c','d','e','f'};
std::vector<int32_t> modesB{'b','g','h','e','i','j'};
std::vector<int32_t> modesC{'m','a','g','f','i','k'};
std::vector<int32_t> modesD{'l','c','h','d','j','m'};
std::vector<int32_t> modesR{'k','l'};
// Set mode extents
std::unordered_map<int32_t, int64_t> extent;
extent['a'] = 16;
extent['b'] = 16;
extent['c'] = 16;
extent['d'] = 16;
extent['e'] = 16;
extent['f'] = 16;
extent['g'] = 16;
extent['h'] = 16;
extent['i'] = 16;
extent['j'] = 16;
extent['k'] = 16;
extent['l'] = 16;
extent['m'] = 16;
// Create a vector of extents for each tensor
std::vector<int64_t> extentA;
for (auto mode : modesA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentB;
for (auto mode : modesB)
extentB.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modesC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentD;
for (auto mode : modesD)
extentD.push_back(extent[mode]);
std::vector<int64_t> extentR;
for (auto mode : modesR)
extentR.push_back(extent[mode]);
if(verbose)
printf("Defined tensor network, modes, and extents\n");
// Sphinx: #3
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modesA)
elementsA *= extent[mode];
size_t elementsB = 1;
for (auto mode : modesB)
elementsB *= extent[mode];
size_t elementsC = 1;
for (auto mode : modesC)
elementsC *= extent[mode];
size_t elementsD = 1;
for (auto mode : modesD)
elementsD *= extent[mode];
size_t elementsR = 1;
for (auto mode : modesR)
elementsR *= extent[mode];
size_t sizeA = sizeof(floatType) * elementsA;
size_t sizeB = sizeof(floatType) * elementsB;
size_t sizeC = sizeof(floatType) * elementsC;
size_t sizeD = sizeof(floatType) * elementsD;
size_t sizeR = sizeof(floatType) * elementsR;
if(verbose)
printf("Total GPU memory used for tensor storage: %.2f GiB\n",
(sizeA + sizeB + sizeC + sizeD + sizeR) / 1024. /1024. / 1024);
void* rawDataIn_d[numInputs];
void* R_d;
HANDLE_CUDA_ERROR( cudaMalloc((void**) &rawDataIn_d[0], sizeA) );
HANDLE_CUDA_ERROR( cudaMalloc((void**) &rawDataIn_d[1], sizeB) );
HANDLE_CUDA_ERROR( cudaMalloc((void**) &rawDataIn_d[2], sizeC) );
HANDLE_CUDA_ERROR( cudaMalloc((void**) &rawDataIn_d[3], sizeD) );
HANDLE_CUDA_ERROR( cudaMalloc((void**) &R_d, sizeR));
floatType *A = (floatType*) malloc(sizeof(floatType) * elementsA);
floatType *B = (floatType*) malloc(sizeof(floatType) * elementsB);
floatType *C = (floatType*) malloc(sizeof(floatType) * elementsC);
floatType *D = (floatType*) malloc(sizeof(floatType) * elementsD);
floatType *R = (floatType*) malloc(sizeof(floatType) * elementsR);
if (A == NULL || B == NULL || C == NULL || D == NULL || R == NULL)
{
printf("Error: Host memory allocation failed!\n");
return -1;
}
/*******************
* Initialize data
*******************/
memset(R, 0, sizeof(floatType) * elementsR);
for (uint64_t i = 0; i < elementsA; i++)
A[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsB; i++)
B[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsC; i++)
C[i] = ((floatType) rand()) / RAND_MAX;
for (uint64_t i = 0; i < elementsD; i++)
D[i] = ((floatType) rand()) / RAND_MAX;
HANDLE_CUDA_ERROR( cudaMemcpy(rawDataIn_d[0], A, sizeA, cudaMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( cudaMemcpy(rawDataIn_d[1], B, sizeB, cudaMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( cudaMemcpy(rawDataIn_d[2], C, sizeC, cudaMemcpyHostToDevice) );
HANDLE_CUDA_ERROR( cudaMemcpy(rawDataIn_d[3], D, sizeD, cudaMemcpyHostToDevice) );
if(verbose)
printf("Allocated GPU memory for data, and initialize data\n");
// Sphinx: #4
/*************************
* cuTensorNet
*************************/
cudaStream_t stream;
cudaStreamCreate(&stream);
cutensornetHandle_t handle;
HANDLE_ERROR( cutensornetCreate(&handle) );
const int32_t nmodeA = modesA.size();
const int32_t nmodeB = modesB.size();
const int32_t nmodeC = modesC.size();
const int32_t nmodeD = modesD.size();
const int32_t nmodeR = modesR.size();
/*******************************
* Create Network Descriptor
*******************************/
const int32_t* modesIn[] = {modesA.data(), modesB.data(), modesC.data(), modesD.data()};
int32_t const numModesIn[] = {nmodeA, nmodeB, nmodeC, nmodeD};
const int64_t* extentsIn[] = {extentA.data(), extentB.data(), extentC.data(), extentD.data()};
const int64_t* stridesIn[] = {NULL, NULL, NULL, NULL}; // strides are optional; if no stride is provided, cuTensorNet assumes a generalized column-major data layout
// Set up tensor network
cutensornetNetworkDescriptor_t descNet;
HANDLE_ERROR( cutensornetCreateNetworkDescriptor(handle,
numInputs, numModesIn, extentsIn, stridesIn, modesIn, NULL,
nmodeR, extentR.data(), /*stridesOut = */NULL, modesR.data(),
typeData, typeCompute,
&descNet) );
if(verbose)
printf("Initialized the cuTensorNet library and created a tensor network descriptor\n");
// Sphinx: #5
/*******************************
* Choose workspace limit based on available resources.
*******************************/
size_t freeMem, totalMem;
HANDLE_CUDA_ERROR( cudaMemGetInfo(&freeMem, &totalMem) );
uint64_t workspaceLimit = (uint64_t)((double)freeMem * 0.9);
if(verbose)
printf("Workspace limit = %lu\n", workspaceLimit);
/*******************************
* Find "optimal" contraction order and slicing
*******************************/
cutensornetContractionOptimizerConfig_t optimizerConfig;
HANDLE_ERROR( cutensornetCreateContractionOptimizerConfig(handle, &optimizerConfig) );
// Set the desired number of hyper-samples (defaults to 0)
int32_t num_hypersamples = 8;
HANDLE_ERROR( cutensornetContractionOptimizerConfigSetAttribute(handle,
optimizerConfig,
CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_SAMPLES,
&num_hypersamples,
sizeof(num_hypersamples)) );
// Create contraction optimizer info and find an optimized contraction path
cutensornetContractionOptimizerInfo_t optimizerInfo;
HANDLE_ERROR( cutensornetCreateContractionOptimizerInfo(handle, descNet, &optimizerInfo) );
HANDLE_ERROR( cutensornetContractionOptimize(handle,
descNet,
optimizerConfig,
workspaceLimit,
optimizerInfo) );
// Query the number of slices the tensor network execution will be split into
int64_t numSlices = 0;
HANDLE_ERROR( cutensornetContractionOptimizerInfoGetAttribute(
handle,
optimizerInfo,
CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICES,
&numSlices,
sizeof(numSlices)) );
assert(numSlices > 0);
if(verbose)
printf("Found an optimized contraction path using cuTensorNet optimizer\n");
// Sphinx: #6
/*******************************
* Create workspace descriptor, allocate workspace, and set it.
*******************************/
cutensornetWorkspaceDescriptor_t workDesc;
HANDLE_ERROR( cutensornetCreateWorkspaceDescriptor(handle, &workDesc) );
int64_t requiredWorkspaceSize = 0;
HANDLE_ERROR( cutensornetWorkspaceComputeContractionSizes(handle,
descNet,
optimizerInfo,
workDesc) );
HANDLE_ERROR( cutensornetWorkspaceGetMemorySize(handle,
workDesc,
CUTENSORNET_WORKSIZE_PREF_MIN,
CUTENSORNET_MEMSPACE_DEVICE,
CUTENSORNET_WORKSPACE_SCRATCH,
&requiredWorkspaceSize) );
void* work = nullptr;
HANDLE_CUDA_ERROR( cudaMalloc(&work, requiredWorkspaceSize) );
HANDLE_ERROR( cutensornetWorkspaceSetMemory(handle,
workDesc,
CUTENSORNET_MEMSPACE_DEVICE,
CUTENSORNET_WORKSPACE_SCRATCH,
work,
requiredWorkspaceSize) );
if(verbose)
printf("Allocated and set up the GPU workspace\n");
// Sphinx: #7
/*******************************
* Initialize the pairwise contraction plan (for cuTENSOR).
*******************************/
cutensornetContractionPlan_t plan;
HANDLE_ERROR( cutensornetCreateContractionPlan(handle,
descNet,
optimizerInfo,
workDesc,
&plan) );
/*******************************
* Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
* for each pairwise tensor contraction.
*******************************/
cutensornetContractionAutotunePreference_t autotunePref;
HANDLE_ERROR( cutensornetCreateContractionAutotunePreference(handle,
&autotunePref) );
const int numAutotuningIterations = 5; // may be 0
HANDLE_ERROR( cutensornetContractionAutotunePreferenceSetAttribute(
handle,
autotunePref,
CUTENSORNET_CONTRACTION_AUTOTUNE_MAX_ITERATIONS,
&numAutotuningIterations,
sizeof(numAutotuningIterations)) );
// Modify the plan again to find the best pair-wise contractions
HANDLE_ERROR( cutensornetContractionAutotune(handle,
plan,
rawDataIn_d,
R_d,
workDesc,
autotunePref,
stream) );
HANDLE_ERROR( cutensornetDestroyContractionAutotunePreference(autotunePref) );
if(verbose)
printf("Created a contraction plan for cuTensorNet and optionally auto-tuned it\n");
// Sphinx: #8
/**********************
* Execute the tensor network contraction
**********************/
// Create a cutensornetSliceGroup_t object from a range of slice IDs
cutensornetSliceGroup_t sliceGroup{};
HANDLE_ERROR( cutensornetCreateSliceGroupFromIDRange(handle, 0, numSlices, 1, &sliceGroup) );
GPUTimer timer {stream};
double minTimeCUTENSORNET = 1e100;
const int numRuns = 3; // number of repeats to get stable performance results
for (int i = 0; i < numRuns; ++i)
{
HANDLE_CUDA_ERROR( cudaMemcpy(R_d, R, sizeR, cudaMemcpyHostToDevice) ); // restore the output tensor on GPU
HANDLE_CUDA_ERROR( cudaDeviceSynchronize() );
/*
* Contract all slices of the tensor network
*/
timer.start();
int32_t accumulateOutput = 0; // output tensor data will be overwritten
HANDLE_ERROR( cutensornetContractSlices(handle,
plan,
rawDataIn_d,
R_d,
accumulateOutput,
workDesc,
sliceGroup, // alternatively, NULL can also be used to contract over all slices instead of specifying a sliceGroup object
stream) );
// Synchronize and measure best timing
auto time = timer.seconds();
minTimeCUTENSORNET = (time > minTimeCUTENSORNET) ? minTimeCUTENSORNET : time;
}
if(verbose)
printf("Contracted the tensor network, each slice used the same contraction plan\n");
// Print the 1-norm of the output tensor (verification)
HANDLE_CUDA_ERROR( cudaStreamSynchronize(stream) );
HANDLE_CUDA_ERROR( cudaMemcpy(R, R_d, sizeR, cudaMemcpyDeviceToHost) ); // restore the output tensor on Host
double norm1 = 0.0;
for (int64_t i = 0; i < elementsR; ++i) {
norm1 += std::abs(R[i]);
}
if(verbose)
printf("Computed the 1-norm of the output tensor: %e\n", norm1);
/*************************/
// Query the total Flop count for the tensor network contraction
double flops {0.0};
HANDLE_ERROR( cutensornetContractionOptimizerInfoGetAttribute(
handle,
optimizerInfo,
CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT,
&flops,
sizeof(flops)) );
if(verbose) {
printf("Number of tensor network slices = %ld\n", numSlices);
printf("Tensor network contraction time (ms) = %.3f\n", minTimeCUTENSORNET * 1000.f);
}
// Free cuTensorNet resources
HANDLE_ERROR( cutensornetDestroySliceGroup(sliceGroup) );
HANDLE_ERROR( cutensornetDestroyContractionPlan(plan) );
HANDLE_ERROR( cutensornetDestroyWorkspaceDescriptor(workDesc) );
HANDLE_ERROR( cutensornetDestroyContractionOptimizerInfo(optimizerInfo) );
HANDLE_ERROR( cutensornetDestroyContractionOptimizerConfig(optimizerConfig) );
HANDLE_ERROR( cutensornetDestroyNetworkDescriptor(descNet) );
HANDLE_ERROR( cutensornetDestroy(handle) );
// Free Host memory resources
if (R) free(R);
if (D) free(D);
if (C) free(C);
if (B) free(B);
if (A) free(A);
// Free GPU memory resources
if (work) cudaFree(work);
if (R_d) cudaFree(R_d);
if (rawDataIn_d[0]) cudaFree(rawDataIn_d[0]);
if (rawDataIn_d[1]) cudaFree(rawDataIn_d[1]);
if (rawDataIn_d[2]) cudaFree(rawDataIn_d[2]);
if (rawDataIn_d[3]) cudaFree(rawDataIn_d[3]);
if(verbose)
printf("Freed resources and exited\n");
return 0;
}
|
1a2eed9b1bb3237b4b2e80a1aafcfdc114aacf79.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "stdio.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include <cstdio>
__forceinline__ __device__ float clipp(float in, float low, float high)
{
return (in < low) ? low : (in > high ? high : in);
}
__global__ void copyKernel(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void copyKernelAlign(unsigned char* input, unsigned char* output, int index, int srcW, int srcH, int dw)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= (srcW * 3 + dw) * srcH)
{
return;
}
int row = tid / (srcW * 3 + dw);
int col = tid % (srcW * 3 + dw);
if (col >= (srcW * 3))
{
return;
}
output[row * (srcW * 3) + col + index * srcW * srcH * 3] = input[row * (srcW * 3 + dw) + col];
}
__global__ void copyKernelD2(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
//printf("%d\n", i);
//output[i+index*size] = input[i + index * size];
//output[i + index * size] = input[i];
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
/*if (i == 196607)
{
printf("!!!!!!!%d,%f\n", i, (float)input[i]);
}*/
//output[i + size + index * size * 3] = input[i + size];
//output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void resizKernel(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)floor(x0), 0.0f, float(srcW)));
int top = int(clipp((float)floor(y0), 0.0f, float(srcH)));
int right = int(clipp((float)ceil(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)ceil(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - mean1;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean2;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean3;
}
}
}
__global__ void resizKernel_torch(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)(x0), 0.0f, float(srcW)));
int top = int(clipp((float)(y0), 0.0f, float(srcH)));
int right = int(clipp((float)(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - mean1;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean2;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean3;
}
}
}
__global__ void paddingKernel(const float* input, float* output, const int resizedW, const int resizedH, const int offset, bool isGreaterWidth)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (isGreaterWidth)
{
int batchIndex = tid / (resizedH * (resizedW + offset));
int indexInPerImg = tid % (resizedH * (resizedW + offset));
int x = indexInPerImg % (resizedW + offset); //
int y = indexInPerImg / (resizedW + offset); //
if (x >= (resizedW + offset) || y >= resizedH)
return;
if (x >= resizedW)
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * resizedH * (resizedW + offset) * 3 + (y * (resizedW + offset) + x) + c * ((resizedW + offset) *resizedH)] = 0.0f;
}
}
else
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * resizedH * (resizedW + offset) * 3 + (y * (resizedW + offset) + x) + c * ((resizedW + offset) *resizedH)] = input[batchIndex * resizedH * resizedW * 3 + (y * resizedW + x) + c * (resizedW *
resizedH)];
}
}
}
else
{
int batchIndex = tid / (resizedW * (resizedH + offset));
int indexInPerImg = tid % (resizedW * (resizedH + offset));
int x = indexInPerImg % (resizedW); //
int y = indexInPerImg / (resizedW); //
if (x >= (resizedW) || y >= (resizedH + offset))
return;
if (y < resizedH)
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * (resizedH + offset) * resizedW * 3 + (y * resizedW + x) + c * resizedW * (resizedH + offset)] =
input[batchIndex * resizedH * resizedW * 3 + (y * resizedW + x) + c * resizedW *resizedH];
}
}
else
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * (resizedH + offset) * resizedW * 3 + (y * resizedW + x) + c * resizedW * (resizedH + offset)] = 0.0f;
}
}
}
}
__global__ void copyArray(float* input, float* output, int dims)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dims)
{
return;
}
output[tid] = input[tid];
}
extern "C" void copyImg(void* input, void* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernel << <GS, BS>> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void copyImgD2(uint8_t* input, uint8_t* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernelD2 << <GS, BS >> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void copyImgAlign(uint8_t* input, uint8_t* output, int index, int srcW, int srcH, int dw)
{
const int dims = (srcW * 3 + dw) * srcH;
const int BS = 512;
const int GS = (dims + BS - 1) / BS;
copyKernelAlign << <GS, BS >> > (input, output, index, srcW, srcH, dw);
}
extern "C" void resizeAndNorm(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int dim = size;
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
resizKernel << <GS, BS>> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH, mean1, mean2, mean3);
}
extern "C" void resizeAndNorm_torch(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int dim = size;
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
resizKernel_torch << <GS, BS >> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH, mean1, mean2, mean3);
}
extern "C" void padding(void* input, void* output, int resizedW, int resizedH, int batchSize)
{ //
bool isGreaterWidth;
int afterPaddingW, afterPaddingH, offset;
if (resizedW > resizedH)
{
isGreaterWidth = true;
afterPaddingW = (resizedW / 32 + 1) * 32;
offset = afterPaddingW - resizedW;
int dims = batchSize * afterPaddingW * resizedH;
int BS = 128;
int GS = (dims + BS - 1) / BS;
paddingKernel << <GS, BS >> > ((float*)input, (float*)output, resizedW, resizedH, offset, isGreaterWidth);
}
else if(resizedW < resizedH)
{
isGreaterWidth = false;
afterPaddingH = (resizedH / 32 + 1) * 32;
offset = afterPaddingH - resizedH;
int dims = batchSize * afterPaddingH * resizedW;
int BS = 128;
int GS = (dims + BS - 1) / BS;
paddingKernel << <GS, BS >> > ((float*)input, (float*)output, resizedW, resizedH, offset, isGreaterWidth);
}
else
{
int dims = batchSize * resizedH * resizedW * 3;
int BS = 128;
int GS = (dims + BS - 1) / BS;
copyArray << <GS, BS >> > ((float*)input, (float*)output, dims);
}
}
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
//__forceinline__ __device__ float clip(float in, float low, float high)
//{
// return (in < low) ? low : (in > high ? high-1 : in);
//}
#define clip(x, a, b) x >= a ? (x < b ? x : b-1) : a;
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void resizeD2Kernel(uint8_t* input,
float* output,
const int outputWidth,
const int outputHeight,
const int inputWidth,
const int inputHeight,
const float pixelGroupSizeX,
const float pixelGroupSizeY,
const int inputChannels,
const int batchSizes
)
{
//printf("196608:%f\n", (float)input[196608]);
//printf("196608:%f\n", (float)input[196608]);
const int dx = blockIdx.x * blockDim.x + threadIdx.x;
const int dy = blockIdx.y * blockDim.y + threadIdx.y;
const int pitchInput = inputWidth * inputChannels;
const int pitchOutput = outputWidth * inputChannels;
const int inputSize = inputWidth * inputHeight * inputChannels;
const int outputSize = outputHeight * outputWidth * inputChannels;
if ((dx < outputWidth) && (dy < outputHeight))
{
for (int batchSize = 0; batchSize < batchSizes; batchSize++)
{
if (inputChannels == 1) {
}
else if (inputChannels == 3) {
//scale_x:
double scale_x = (double)inputWidth / outputWidth;
double scale_y = (double)inputHeight / outputHeight;
int xmax = outputWidth;
float fx = (float)((dx + 0.5) * scale_x - 0.5);
//floorsx255
int sx = floor(fx);
//fx
fx = fx - sx;
int isx1 = sx;
if (isx1 < 0) {
fx = 0.0;
isx1 = 0;
}
if (isx1 >= (inputWidth - 1)) {
xmax = ::min(xmax, dy);
fx = 0;
isx1 = (inputWidth - 1);
}
float2 cbufx;
cbufx.x = (1.f - fx);
cbufx.y = fx;
float fy = (float)((dy + 0.5) * scale_y - 0.5);
int sy = floor(fy);
fy = fy - sy;
int isy1 = clip(sy - 1 + 1 + 0, 0, inputHeight);
int isy2 = clip(sy - 1 + 1 + 1, 0, inputHeight);
float2 cbufy;
cbufy.x = (1.f - fy);
cbufy.y = fy;
int isx2 = isx1 + 1;
if (isx2 >= inputWidth)
{
isx2 = isx2 - 1;
}
float3 d0;
float3 s11 = make_float3(input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 0], input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 1], input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 2]);
float3 s12 = make_float3(input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0], input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 1], input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 2]);
float3 s21 = make_float3(input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 0], input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 1], input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 2]);
float3 s22 = make_float3(input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 0], input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 1], input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 2]);
float h_rst00, h_rst01;
if (dy > xmax - 1)
{
h_rst00 = s11.x;
h_rst01 = s21.x;
}
else
{
h_rst00 = s11.x * cbufx.x + s12.x * cbufx.y;
h_rst01 = s21.x * cbufx.x + s22.x * cbufx.y;
}
d0.x = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
if (dy > xmax - 1)
{
h_rst00 = s11.y;
h_rst01 = s21.y;
}
else
{
h_rst00 = s11.y * cbufx.x + s12.y * cbufx.y;
h_rst01 = s21.y * cbufx.x + s22.y * cbufx.y;
}
d0.y = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
if (dy > xmax - 1)
{
h_rst00 = s11.z;
h_rst01 = s21.z;
}
else
{
h_rst00 = s11.z * cbufx.x + s12.z * cbufx.y;
h_rst01 = s21.z * cbufx.x + s22.z * cbufx.y;
}
d0.z = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 0] = (d0.x);
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1] = (d0.y);
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 2] = (d0.z);
/* if ((outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1) == (1228798))
{
printf("resize:::%f\n", output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1]);
printf("s11:%f,%f,%f\n", s11.x, s11.y, s11.z);
printf("s12:%f,%f,%f\n", s12.x, s12.y, s12.z);
printf("s21:%f,%f,%f\n", s21.x, s21.y, s21.z);
printf("s22:%f,%f,%f\n", s22.x, s22.y, s22.z);
printf("%f,%f,%f,%f\n", h_rst00, cbufy.x, h_rst01, cbufy.y);
printf("%f,%d,%d, %d,%d\n", (float)input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0],
(isy1 * inputWidth + isx2) * inputChannels, isy1, isx2, inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0);
printf("%d\n", (isy1 * inputWidth + isx1) * inputChannels + 0);
printf("%d\n", (isy1 * inputWidth + isx2) * inputChannels + 0);
printf("%d\n", (isy2 * inputWidth + isx1) * inputChannels + 0);
printf("%d\n", (isy2 * inputWidth + isx2) * inputChannels + 0);
}*/
}
else {
}
}
}
}
__global__ void transformD2Kernel(float* resizedInput, float* transform, int batchSize, int dims)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
//printf("transform:%f\n", resizedInput[i]);
if (i >= (dims * 3))
{
return;
}
for (int k = 0; k < batchSize; k++)
{
if (i % 3 == 0)
{
transform[k * 3 * dims + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
if (i % 3 == 1)
{
transform[k * 3 * dims + dims + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + dims + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
if (i % 3 == 2)
{
transform[k * 3 * dims + dims * 2 + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + dims * 2 + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
}
//for (int c = 0; c < 3; c++)
//{
// transform[batchIndex * (dims / batchSize) * 3 + indexPerImg * 3 + c] = resizedInput[batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg];
// if (i == 0)
// {
// //batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg
// printf("transform:%f\n", resizedInput[batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg]);
// }
//}
}
__global__ void normD2Kernel(float* transform, float* norm, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= (dims*batchSize))
{
return;
}
int batchIndex = i / (dims);
int indexPerImg = i % (dims);
for (int c = 0; c < 3; c++)
{
if (c == 0)
{
norm[batchIndex * 3 * (dims)+indexPerImg] = ((transform[batchIndex * 3 * (dims)+indexPerImg]) - mean3);
}
if (c == 1)
{
norm[batchIndex * 3 * (dims)+(dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims)+indexPerImg]) - mean2);
}
if (c == 2)
{
norm[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg]) - mean1);
}
}
}
extern "C" void transformD2(void* resizedInput, void* transform, void* normGpu, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
const int BS = 1024;
const int GS2 = (dims*batchSize + BS - 1) / BS;
const int GS1 = (dims * 3 + BS - 1) / BS;
transformD2Kernel << <GS1, BS >> > ((float*)resizedInput, (float*)transform, batchSize, dims);
normD2Kernel << <GS2, BS >> > ((float*)transform, (float*)normGpu, batchSize, dims, mean1, mean2, mean3, std1, std2, std3);
}
__global__ void normYolov3Kernel(float* transform, float* norm, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= (dims*batchSize))
{
return;
}
int batchIndex = i / (dims);
int indexPerImg = i % (dims);
for (int c = 0; c < 3; c++)
{
if (c == 0)
{
norm[batchIndex * 3 * (dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+indexPerImg]) - mean3) / std3;
}
if (c == 1)
{
norm[batchIndex * 3 * (dims)+(dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims)+indexPerImg]) - mean2) / std2;
}
if (c == 2)
{
norm[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg]) - mean1) / std1;
}
}
}
extern "C" void transformYolov3(void* resizedInput, void* transform, void* normGpu, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
const int BS = 1024;
const int GS2 = (dims*batchSize + BS - 1) / BS;
const int GS1 = (dims * 3 + BS - 1) / BS;
transformD2Kernel << <GS1, BS >> > ((float*)resizedInput, (float*)transform, batchSize, dims);
normYolov3Kernel << <GS2, BS >> > ((float*)transform, (float*)normGpu, batchSize, dims, mean1, mean2, mean3, std1, std2, std3);
}
extern "C" void resizeAndNormD2(uint8_t* inputGpu, float* resizedOutputGpu, int dstW, int dstH, int srcW, int srcH, int batchSize)
{
const dim3 block(16, 16,1);
//Calculate grid size to cover the whole image
const dim3 grid((dstW + block.x - 1) / block.x, (dstH + block.y - 1) / block.y,1);
const dim3 grid1(40, 40, 1);
const float pixelGroupSizeY = float(srcH) / float(dstH);
const float pixelGroupSizeX = float(srcW) / float(dstW);
resizeD2Kernel << <grid, block >> > ((uint8_t*)inputGpu, (float*)resizedOutputGpu, dstW, dstH, srcW, srcH, pixelGroupSizeX, pixelGroupSizeY, 3, batchSize);
} | 1a2eed9b1bb3237b4b2e80a1aafcfdc114aacf79.cu | #include "device_launch_parameters.h"
#include "device_functions.h"
#include "stdio.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include <cstdio>
__forceinline__ __device__ float clipp(float in, float low, float high)
{
return (in < low) ? low : (in > high ? high : in);
}
__global__ void copyKernel(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void copyKernelAlign(unsigned char* input, unsigned char* output, int index, int srcW, int srcH, int dw)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= (srcW * 3 + dw) * srcH)
{
return;
}
int row = tid / (srcW * 3 + dw);
int col = tid % (srcW * 3 + dw);
if (col >= (srcW * 3))
{
return;
}
output[row * (srcW * 3) + col + index * srcW * srcH * 3] = input[row * (srcW * 3 + dw) + col];
}
__global__ void copyKernelD2(unsigned char* input, unsigned char* output, int index, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
//printf("%d\n", i);
//output[i+index*size] = input[i + index * size];
//output[i + index * size] = input[i];
output[i + index * size * 3] = input[i];
output[i + size + index * size * 3] = input[i + size];
output[i + 2 * size + index * size * 3] = input[i + 2 * size];
/*if (i == 196607)
{
printf("!!!!!!!%d,%f\n", i, (float)input[i]);
}*/
//output[i + size + index * size * 3] = input[i + size];
//output[i + 2 * size + index * size * 3] = input[i + 2 * size];
}
__global__ void resizKernel(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)floor(x0), 0.0f, float(srcW)));
int top = int(clipp((float)floor(y0), 0.0f, float(srcH)));
int right = int(clipp((float)ceil(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)ceil(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - mean1;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean2;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean3;
}
}
}
__global__ void resizKernel_torch(unsigned char *inputGpu, float *outputGpu, float* normGpu, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = i % (dstW*dstH);
int l = i / (dstW*dstH);
const int x = k % dstW;
const int y = k / dstW;
if (x >= dstW || y >= dstH)
return;
float ratio_h = float(srcH) / float(dstH);
float ratio_w = float(srcW) / float(dstW);
float x0 = float(x) * ratio_w;
float y0 = float(y) * ratio_h;
int left = int(clipp((float)(x0), 0.0f, float(srcW)));
int top = int(clipp((float)(y0), 0.0f, float(srcH)));
int right = int(clipp((float)(x0), 0.0f, float(srcW)));
int bottom = int(clipp((float)(y0), 0.0f, float(srcH)));
for (int c = 0; c < 3; ++c)
{
unsigned char left_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + left * (3) + c];
unsigned char right_top_val = inputGpu[l*srcW*srcH * 3 + top * (srcW * 3) + right * (3) + c];
unsigned char left_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + left * (3) + c];
unsigned char right_bottom_val = inputGpu[l*srcW*srcH * 3 + bottom * (srcW * 3) + right * (3) + c];
float top_lerp = left_top_val + (right_top_val - left_top_val) * (x0 - left);
float bottom_lerp = left_bottom_val + (right_bottom_val - left_bottom_val) * (x0 - left);
float lerp = clipp((top_lerp + (bottom_lerp - top_lerp) * (y0 - top)), 0.0f, 255.0f);
outputGpu[i * 3 + c] = lerp;
//float pixelMean[3]{ 123.68, 116.779, 103.939 };
if (c == 0)
{
normGpu[l*dstW*dstH * 3 + k] = float(outputGpu[i * 3 + c]) - mean1;
}
if (c == 1)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean2;
}
if (c == 2)
{
normGpu[l*dstW*dstH * 3 + c * dstW*dstH + k] = float(outputGpu[i * 3 + c]) - mean3;
}
}
}
__global__ void paddingKernel(const float* input, float* output, const int resizedW, const int resizedH, const int offset, bool isGreaterWidth)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (isGreaterWidth)
{
int batchIndex = tid / (resizedH * (resizedW + offset));
int indexInPerImg = tid % (resizedH * (resizedW + offset));
int x = indexInPerImg % (resizedW + offset); // 列号
int y = indexInPerImg / (resizedW + offset); // 行号
if (x >= (resizedW + offset) || y >= resizedH)
return;
if (x >= resizedW)
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * resizedH * (resizedW + offset) * 3 + (y * (resizedW + offset) + x) + c * ((resizedW + offset) *resizedH)] = 0.0f;
}
}
else
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * resizedH * (resizedW + offset) * 3 + (y * (resizedW + offset) + x) + c * ((resizedW + offset) *resizedH)] = input[batchIndex * resizedH * resizedW * 3 + (y * resizedW + x) + c * (resizedW *
resizedH)];
}
}
}
else
{
int batchIndex = tid / (resizedW * (resizedH + offset));
int indexInPerImg = tid % (resizedW * (resizedH + offset));
int x = indexInPerImg % (resizedW); // 列号
int y = indexInPerImg / (resizedW); // 行号
if (x >= (resizedW) || y >= (resizedH + offset))
return;
if (y < resizedH)
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * (resizedH + offset) * resizedW * 3 + (y * resizedW + x) + c * resizedW * (resizedH + offset)] =
input[batchIndex * resizedH * resizedW * 3 + (y * resizedW + x) + c * resizedW *resizedH];
}
}
else
{
for (int c = 0; c < 3; c++)
{
output[batchIndex * (resizedH + offset) * resizedW * 3 + (y * resizedW + x) + c * resizedW * (resizedH + offset)] = 0.0f;
}
}
}
}
__global__ void copyArray(float* input, float* output, int dims)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dims)
{
return;
}
output[tid] = input[tid];
}
extern "C" void copyImg(void* input, void* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernel << <GS, BS>> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void copyImgD2(uint8_t* input, uint8_t* output, int index, int k)
{
const int dim = k;
const int BS = 512;
const int GS = (dim + BS - 1) / BS;
copyKernelD2 << <GS, BS >> > ((unsigned char *)input, (unsigned char *)output, index, dim);
}
extern "C" void copyImgAlign(uint8_t* input, uint8_t* output, int index, int srcW, int srcH, int dw)
{
const int dims = (srcW * 3 + dw) * srcH;
const int BS = 512;
const int GS = (dims + BS - 1) / BS;
copyKernelAlign << <GS, BS >> > (input, output, index, srcW, srcH, dw);
}
extern "C" void resizeAndNorm(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int dim = size;
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
resizKernel << <GS, BS>> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH, mean1, mean2, mean3);
}
extern "C" void resizeAndNorm_torch(void* inputGpu, void* resizedOutputGpu, void* normGpu, int size, int dstW, int dstH, int srcW, int srcH, float mean1, float mean2, float mean3)
{
int dim = size;
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
resizKernel_torch << <GS, BS >> > ((unsigned char *)inputGpu, (float *)resizedOutputGpu, (float*)normGpu, dstW, dstH, srcW, srcH, mean1, mean2, mean3);
}
extern "C" void padding(void* input, void* output, int resizedW, int resizedH, int batchSize)
{ //宽大于高
bool isGreaterWidth;
int afterPaddingW, afterPaddingH, offset;
if (resizedW > resizedH)
{
isGreaterWidth = true;
afterPaddingW = (resizedW / 32 + 1) * 32;
offset = afterPaddingW - resizedW;
int dims = batchSize * afterPaddingW * resizedH;
int BS = 128;
int GS = (dims + BS - 1) / BS;
paddingKernel << <GS, BS >> > ((float*)input, (float*)output, resizedW, resizedH, offset, isGreaterWidth);
}
else if(resizedW < resizedH)
{
isGreaterWidth = false;
afterPaddingH = (resizedH / 32 + 1) * 32;
offset = afterPaddingH - resizedH;
int dims = batchSize * afterPaddingH * resizedW;
int BS = 128;
int GS = (dims + BS - 1) / BS;
paddingKernel << <GS, BS >> > ((float*)input, (float*)output, resizedW, resizedH, offset, isGreaterWidth);
}
else
{
int dims = batchSize * resizedH * resizedW * 3;
int BS = 128;
int GS = (dims + BS - 1) / BS;
copyArray << <GS, BS >> > ((float*)input, (float*)output, dims);
}
}
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
//__forceinline__ __device__ float clip(float in, float low, float high)
//{
// return (in < low) ? low : (in > high ? high-1 : in);
//}
#define clip(x, a, b) x >= a ? (x < b ? x : b-1) : a;
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void resizeD2Kernel(uint8_t* input,
float* output,
const int outputWidth,
const int outputHeight,
const int inputWidth,
const int inputHeight,
const float pixelGroupSizeX,
const float pixelGroupSizeY,
const int inputChannels,
const int batchSizes
)
{
//printf("196608:%f\n", (float)input[196608]);
//printf("196608:%f\n", (float)input[196608]);
const int dx = blockIdx.x * blockDim.x + threadIdx.x;
const int dy = blockIdx.y * blockDim.y + threadIdx.y;
const int pitchInput = inputWidth * inputChannels;
const int pitchOutput = outputWidth * inputChannels;
const int inputSize = inputWidth * inputHeight * inputChannels;
const int outputSize = outputHeight * outputWidth * inputChannels;
if ((dx < outputWidth) && (dy < outputHeight))
{
for (int batchSize = 0; batchSize < batchSizes; batchSize++)
{
if (inputChannels == 1) {
}
else if (inputChannels == 3) {
//scale_x:缩放尺寸
double scale_x = (double)inputWidth / outputWidth;
double scale_y = (double)inputHeight / outputHeight;
int xmax = outputWidth;
float fx = (float)((dx + 0.5) * scale_x - 0.5);
//floor:向下取整,sx最大为255
int sx = floor(fx);
//fx:原始结果与向下取整结果差值
fx = fx - sx;
int isx1 = sx;
if (isx1 < 0) {
fx = 0.0;
isx1 = 0;
}
if (isx1 >= (inputWidth - 1)) {
xmax = ::min(xmax, dy);
fx = 0;
isx1 = (inputWidth - 1);
}
float2 cbufx;
cbufx.x = (1.f - fx);
cbufx.y = fx;
float fy = (float)((dy + 0.5) * scale_y - 0.5);
int sy = floor(fy);
fy = fy - sy;
int isy1 = clip(sy - 1 + 1 + 0, 0, inputHeight);
int isy2 = clip(sy - 1 + 1 + 1, 0, inputHeight);
float2 cbufy;
cbufy.x = (1.f - fy);
cbufy.y = fy;
int isx2 = isx1 + 1;
if (isx2 >= inputWidth)
{
isx2 = isx2 - 1;
}
float3 d0;
float3 s11 = make_float3(input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 0], input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 1], input[inputSize * (batchSize)+(isy1 * inputWidth + isx1) * inputChannels + 2]);
float3 s12 = make_float3(input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0], input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 1], input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 2]);
float3 s21 = make_float3(input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 0], input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 1], input[inputSize * (batchSize)+(isy2 * inputWidth + isx1) * inputChannels + 2]);
float3 s22 = make_float3(input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 0], input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 1], input[inputSize * (batchSize)+(isy2 * inputWidth + isx2) * inputChannels + 2]);
float h_rst00, h_rst01;
if (dy > xmax - 1)
{
h_rst00 = s11.x;
h_rst01 = s21.x;
}
else
{
h_rst00 = s11.x * cbufx.x + s12.x * cbufx.y;
h_rst01 = s21.x * cbufx.x + s22.x * cbufx.y;
}
d0.x = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
if (dy > xmax - 1)
{
h_rst00 = s11.y;
h_rst01 = s21.y;
}
else
{
h_rst00 = s11.y * cbufx.x + s12.y * cbufx.y;
h_rst01 = s21.y * cbufx.x + s22.y * cbufx.y;
}
d0.y = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
if (dy > xmax - 1)
{
h_rst00 = s11.z;
h_rst01 = s21.z;
}
else
{
h_rst00 = s11.z * cbufx.x + s12.z * cbufx.y;
h_rst01 = s21.z * cbufx.x + s22.z * cbufx.y;
}
d0.z = h_rst00 * cbufy.x + h_rst01 * cbufy.y;
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 0] = (d0.x);
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1] = (d0.y);
output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 2] = (d0.z);
/* if ((outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1) == (1228798))
{
printf("resize:::%f\n", output[outputSize * (batchSize)+(dy*outputWidth + dx) * 3 + 1]);
printf("s11:%f,%f,%f\n", s11.x, s11.y, s11.z);
printf("s12:%f,%f,%f\n", s12.x, s12.y, s12.z);
printf("s21:%f,%f,%f\n", s21.x, s21.y, s21.z);
printf("s22:%f,%f,%f\n", s22.x, s22.y, s22.z);
printf("%f,%f,%f,%f\n", h_rst00, cbufy.x, h_rst01, cbufy.y);
printf("%f,%d,%d, %d,%d\n", (float)input[inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0],
(isy1 * inputWidth + isx2) * inputChannels, isy1, isx2, inputSize * (batchSize)+(isy1 * inputWidth + isx2) * inputChannels + 0);
printf("%d\n", (isy1 * inputWidth + isx1) * inputChannels + 0);
printf("%d\n", (isy1 * inputWidth + isx2) * inputChannels + 0);
printf("%d\n", (isy2 * inputWidth + isx1) * inputChannels + 0);
printf("%d\n", (isy2 * inputWidth + isx2) * inputChannels + 0);
}*/
}
else {
}
}
}
}
__global__ void transformD2Kernel(float* resizedInput, float* transform, int batchSize, int dims)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
//printf("transform:%f\n", resizedInput[i]);
if (i >= (dims * 3))
{
return;
}
for (int k = 0; k < batchSize; k++)
{
if (i % 3 == 0)
{
transform[k * 3 * dims + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
if (i % 3 == 1)
{
transform[k * 3 * dims + dims + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + dims + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
if (i % 3 == 2)
{
transform[k * 3 * dims + dims * 2 + (i / 3)] = resizedInput[k * 3 * dims + i];
/* if ((k * 3 * dims + dims * 2 + (i / 3)) == (640 * 640 * 2-1))
{
printf("!!!!!!!!!!!!test:%f\n", resizedInput[k * 3 * dims + i]);
}*/
}
}
//for (int c = 0; c < 3; c++)
//{
// transform[batchIndex * (dims / batchSize) * 3 + indexPerImg * 3 + c] = resizedInput[batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg];
// if (i == 0)
// {
// //batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg
// printf("transform:%f\n", resizedInput[batchIndex * (dims / batchSize) * 3 + c * (dims / batchSize) + indexPerImg]);
// }
//}
}
__global__ void normD2Kernel(float* transform, float* norm, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= (dims*batchSize))
{
return;
}
int batchIndex = i / (dims);
int indexPerImg = i % (dims);
for (int c = 0; c < 3; c++)
{
if (c == 0)
{
norm[batchIndex * 3 * (dims)+indexPerImg] = ((transform[batchIndex * 3 * (dims)+indexPerImg]) - mean3);
}
if (c == 1)
{
norm[batchIndex * 3 * (dims)+(dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims)+indexPerImg]) - mean2);
}
if (c == 2)
{
norm[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg]) - mean1);
}
}
}
extern "C" void transformD2(void* resizedInput, void* transform, void* normGpu, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
const int BS = 1024;
const int GS2 = (dims*batchSize + BS - 1) / BS;
const int GS1 = (dims * 3 + BS - 1) / BS;
transformD2Kernel << <GS1, BS >> > ((float*)resizedInput, (float*)transform, batchSize, dims);
normD2Kernel << <GS2, BS >> > ((float*)transform, (float*)normGpu, batchSize, dims, mean1, mean2, mean3, std1, std2, std3);
}
__global__ void normYolov3Kernel(float* transform, float* norm, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= (dims*batchSize))
{
return;
}
int batchIndex = i / (dims);
int indexPerImg = i % (dims);
for (int c = 0; c < 3; c++)
{
if (c == 0)
{
norm[batchIndex * 3 * (dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+indexPerImg]) - mean3) / std3;
}
if (c == 1)
{
norm[batchIndex * 3 * (dims)+(dims)+indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims)+indexPerImg]) - mean2) / std2;
}
if (c == 2)
{
norm[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg] = (float(transform[batchIndex * 3 * (dims)+(dims) * 2 + indexPerImg]) - mean1) / std1;
}
}
}
extern "C" void transformYolov3(void* resizedInput, void* transform, void* normGpu, int batchSize, int dims, float mean1, float mean2, float mean3, float std1, float std2, float std3)
{
const int BS = 1024;
const int GS2 = (dims*batchSize + BS - 1) / BS;
const int GS1 = (dims * 3 + BS - 1) / BS;
transformD2Kernel << <GS1, BS >> > ((float*)resizedInput, (float*)transform, batchSize, dims);
normYolov3Kernel << <GS2, BS >> > ((float*)transform, (float*)normGpu, batchSize, dims, mean1, mean2, mean3, std1, std2, std3);
}
extern "C" void resizeAndNormD2(uint8_t* inputGpu, float* resizedOutputGpu, int dstW, int dstH, int srcW, int srcH, int batchSize)
{
const dim3 block(16, 16,1);
//Calculate grid size to cover the whole image
const dim3 grid((dstW + block.x - 1) / block.x, (dstH + block.y - 1) / block.y,1);
const dim3 grid1(40, 40, 1);
const float pixelGroupSizeY = float(srcH) / float(dstH);
const float pixelGroupSizeX = float(srcW) / float(dstW);
resizeD2Kernel << <grid, block >> > ((uint8_t*)inputGpu, (float*)resizedOutputGpu, dstW, dstH, srcW, srcH, pixelGroupSizeX, pixelGroupSizeY, 3, batchSize);
} |
35624b069df27b7acfe75aa58f449dae81c8b40a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/extrema.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void minmax_element_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::minmax_element(exec, first, last);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryPredicate>
__global__
void minmax_element_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::minmax_element(exec, first, last, pred);
}
template<typename ExecutionPolicy>
void TestMinMaxElementDevice(ExecutionPolicy exec)
{
size_t n = 1000;
thrust::host_vector<int> h_data = unittest::random_samples<int>(n);
thrust::device_vector<int> d_data = h_data;
typename thrust::host_vector<int>::iterator h_min;
typename thrust::host_vector<int>::iterator h_max;
typename thrust::device_vector<int>::iterator d_min;
typename thrust::device_vector<int>::iterator d_max;
typedef thrust::pair<
typename thrust::device_vector<int>::iterator,
typename thrust::device_vector<int>::iterator
> pair_type;
thrust::device_vector<pair_type> d_result(1);
h_min = thrust::minmax_element(h_data.begin(), h_data.end()).first;
h_max = thrust::minmax_element(h_data.begin(), h_data.end()).second;
d_min = thrust::minmax_element(d_data.begin(), d_data.end()).first;
d_max = thrust::minmax_element(d_data.begin(), d_data.end()).second;
hipLaunchKernelGGL(( minmax_element_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), d_result.begin());
d_min = ((pair_type)d_result[0]).first;
d_max = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
h_max = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<int>()).first;
h_min = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<int>()).second;
hipLaunchKernelGGL(( minmax_element_kernel), dim3(1),dim3(1), 0, 0, exec, d_data.begin(), d_data.end(), thrust::greater<int>(), d_result.begin());
d_max = ((pair_type)d_result[0]).first;
d_min = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
}
void TestMinMaxElementDeviceSeq()
{
TestMinMaxElementDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMinMaxElementDeviceSeq);
void TestMinMaxElementDeviceDevice()
{
TestMinMaxElementDevice(thrust::device);
}
DECLARE_UNITTEST(TestMinMaxElementDeviceDevice);
void TestMinMaxElementCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
Vector data(6);
data[0] = 3;
data[1] = 5;
data[2] = 1;
data[3] = 2;
data[4] = 5;
data[5] = 1;
hipStream_t s;
hipStreamCreate(&s);
ASSERT_EQUAL( *thrust::minmax_element(thrust::hip::par.on(s), data.begin(), data.end()).first, 1);
ASSERT_EQUAL( *thrust::minmax_element(thrust::hip::par.on(s), data.begin(), data.end()).second, 5);
ASSERT_EQUAL( thrust::minmax_element(thrust::hip::par.on(s), data.begin(), data.end()).first - data.begin(), 2);
ASSERT_EQUAL( thrust::minmax_element(thrust::hip::par.on(s), data.begin(), data.end()).second - data.begin(), 1);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestMinMaxElementCudaStreams);
| 35624b069df27b7acfe75aa58f449dae81c8b40a.cu | #include <unittest/unittest.h>
#include <thrust/extrema.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void minmax_element_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::minmax_element(exec, first, last);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryPredicate>
__global__
void minmax_element_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::minmax_element(exec, first, last, pred);
}
template<typename ExecutionPolicy>
void TestMinMaxElementDevice(ExecutionPolicy exec)
{
size_t n = 1000;
thrust::host_vector<int> h_data = unittest::random_samples<int>(n);
thrust::device_vector<int> d_data = h_data;
typename thrust::host_vector<int>::iterator h_min;
typename thrust::host_vector<int>::iterator h_max;
typename thrust::device_vector<int>::iterator d_min;
typename thrust::device_vector<int>::iterator d_max;
typedef thrust::pair<
typename thrust::device_vector<int>::iterator,
typename thrust::device_vector<int>::iterator
> pair_type;
thrust::device_vector<pair_type> d_result(1);
h_min = thrust::minmax_element(h_data.begin(), h_data.end()).first;
h_max = thrust::minmax_element(h_data.begin(), h_data.end()).second;
d_min = thrust::minmax_element(d_data.begin(), d_data.end()).first;
d_max = thrust::minmax_element(d_data.begin(), d_data.end()).second;
minmax_element_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), d_result.begin());
d_min = ((pair_type)d_result[0]).first;
d_max = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
h_max = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<int>()).first;
h_min = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<int>()).second;
minmax_element_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), thrust::greater<int>(), d_result.begin());
d_max = ((pair_type)d_result[0]).first;
d_min = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
}
void TestMinMaxElementDeviceSeq()
{
TestMinMaxElementDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMinMaxElementDeviceSeq);
void TestMinMaxElementDeviceDevice()
{
TestMinMaxElementDevice(thrust::device);
}
DECLARE_UNITTEST(TestMinMaxElementDeviceDevice);
void TestMinMaxElementCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
Vector data(6);
data[0] = 3;
data[1] = 5;
data[2] = 1;
data[3] = 2;
data[4] = 5;
data[5] = 1;
cudaStream_t s;
cudaStreamCreate(&s);
ASSERT_EQUAL( *thrust::minmax_element(thrust::cuda::par.on(s), data.begin(), data.end()).first, 1);
ASSERT_EQUAL( *thrust::minmax_element(thrust::cuda::par.on(s), data.begin(), data.end()).second, 5);
ASSERT_EQUAL( thrust::minmax_element(thrust::cuda::par.on(s), data.begin(), data.end()).first - data.begin(), 2);
ASSERT_EQUAL( thrust::minmax_element(thrust::cuda::par.on(s), data.begin(), data.end()).second - data.begin(), 1);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestMinMaxElementCudaStreams);
|
71e42af1d94f3e1aea2f37d1dc9d37d1bfa9f85d.hip | // !!! This is a file automatically generated by hipify!!!
/*
<one line to give the library's name and an idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
// #include <boost/format.hpp>
#include "game_display.h"
#include "game_scene/gssim_view.h"
#include "../game_core/twgame.cu"
// #include "game_core/physics.h"
// #include "game_core/tankbullet.h"
// #include "game_core/basictank.h"
#define SCALE_FACTOR 12
#define CUDA_BLOCKS 1
#define CUDA_THREADS MAX_ARRAY_SIZE
using namespace std;
// using namespace boost;
// helper function
void apply_transform(CL_GraphicContext* gc, Physics::vec2& c){
c.x *= SCALE_FACTOR;
c.y *= -SCALE_FACTOR;
c.x += gc->get_width()/2;
c.y += gc->get_height()/2;
}
GSSimView::GSSimView(CL_GraphicContext& gc, CL_ResourceManager& resources,
sim_data& sd)
: m_physrunner(new Physics::PhysRunner::RunnerCore()),
m_simd(sd){
// setup the debug text
CL_FontDescription desc;
desc.set_typeface_name("monospace");
desc.set_height(12);
m_debugfont.reset(new CL_Font_System(gc, desc));
m_background.reset(new CL_Sprite(gc,
"game_assets/background",
&resources));
m_testbullet.reset(new CL_Sprite(gc,
"game_assets/bullet",
&resources));
m_testtank.reset(new CL_Sprite(gc,
"game_assets/tank_blu",
&resources));
m_testtank2.reset(new CL_Sprite(gc,
"game_assets/tank_red",
&resources));
Physics::PhysRunner::initialize(m_physrunner.get());
TankBullet::initialize(&m_bullets, m_physrunner.get());
BasicTank::initialize(&m_tanks, m_physrunner.get(), &m_bullets);
AI::initialize(&m_ai, &m_tanks, &m_bullets);
m_playertank = 0;
m_player2tank = 1;
// get the simulation data
m_tanks = m_simd.tc;
m_bullets = m_simd.bc;
m_physrunner->bodies = m_simd.bodies[0];
// reset the pointers
BasicTank::reset_pointers(&m_tanks, m_physrunner.get(), &m_bullets);
TankBullet::reset_phys_pointer(&m_bullets, m_physrunner.get());
m_frames_elapsed = 0;
}
GSSimView::~GSSimView(){
BasicTank::destroy(&m_tanks);
TankBullet::destroy(&m_bullets);
}
void GSSimView::onSceneDeactivate(){
}
void GSSimView::onSceneActivate(){
m_timer.restart();
}
#include <iostream>
using namespace std;
void GSSimView::onFrameRender(CL_GraphicContext* gc){
// draw the background
Physics::vec2 pos;
apply_transform(gc, pos);
m_background->draw(*gc, (f32)pos.x, (f32)pos.y);
// draw the bullets, yes, we're cheating the numbers
// OOP can wait another day
for(int i = 0; i < 3; ++i){
pos = TankBullet::get_bullet_pos(&m_bullets, i);
apply_transform(gc, pos);
// if(m_bullets.state[i] != BULLET_STATE_INACTIVE){
m_testbullet->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
}
// draw the tanks
// if(m_tanks.state[m_playertank] != TANK_STATE_INACTIVE){
pos = BasicTank::get_tank_pos(&m_tanks, m_playertank);
apply_transform(gc, pos);
f32 rot = BasicTank::get_tank_rot(&m_tanks, m_playertank);
m_testtank->set_angle(CL_Angle(-rot, cl_degrees));
m_testtank->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
// if(m_tanks.state[m_player2tank] != TANK_STATE_INACTIVE){
pos = BasicTank::get_tank_pos(&m_tanks, m_player2tank);
apply_transform(gc, pos);
rot = BasicTank::get_tank_rot(&m_tanks, m_player2tank);
m_testtank2->set_angle(CL_Angle(-rot, cl_degrees));
m_testtank2->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
// Debug info
CL_StringFormat fmt("States: %1 %2 %3 %4 | Player pos: %5 %6 | Time elapsed: %7");
fmt.set_arg(1, m_ai.bullet_vector[0]);
fmt.set_arg(2, m_ai.tank_vector[0]);
fmt.set_arg(3, m_ai.direction_state[0]);
fmt.set_arg(4, m_ai.distance_state[0]);
pos = Physics::PhysRunner::get_cur_pos(m_physrunner.get(),
m_tanks.phys_id[m_playertank]);
fmt.set_arg(5, pos.x);
fmt.set_arg(6, pos.y);
fmt.set_arg(7, m_frames_elapsed);
m_dbgmsg = fmt.get_result();
m_debugfont->draw_text(*gc, 1, 12, m_dbgmsg, CL_Colorf::red);
}
void GSSimView::onFrameUpdate(double dt,
CL_InputDevice* keyboard,
CL_InputDevice* mouse){
if(m_frames_elapsed < MAX_BODY_RECORD){
// perform all the update
//AI::timestep(&m_ai, dt);
//Physics::PhysRunner::timestep(m_physrunner.get(), dt);
// copy over the timestep data
m_physrunner->bodies = m_simd.bodies[m_frames_elapsed];
Physics::vec2 pos = m_physrunner->bodies.cur_pos.get_vec2(m_tanks.phys_id[0]);
f32 rot = m_physrunner->bodies.rotation[m_tanks.phys_id[0]];
// cout << format("E.x: %1 E.y: %2 E.r: %3") % pos.x % pos.y % rot << endl;
// printf("E.x: %.12f E.y: %.12f E.r: %.12f\n", pos.x, pos.y, rot);
TankBullet::update(&m_bullets, dt);
BasicTank::update(&m_tanks, dt);
// perform collision detection for the bullets
for(int i = 0; i < MAX_BULLETS; ++i){
Collision::bullet_tank_check(&m_bullets, &m_tanks, i);
}
// perform collision detection for tanks
for(int i = 0; i < MAX_TANKS; ++i){
Collision::tank_tank_check(&m_tanks, i);
}
}
// update the sprites
m_background->update();
m_testbullet->update();
m_testtank->update();
// if(m_tanks.state[0] != TANK_STATE_INACTIVE){
Physics::vec2 pos = BasicTank::get_tank_pos(&m_tanks, m_playertank);
// if(pos.x != OFFSCREEN_X && pos.y != OFFSCREEN_Y){
++m_frames_elapsed;
// }
}
| 71e42af1d94f3e1aea2f37d1dc9d37d1bfa9f85d.cu | /*
<one line to give the library's name and an idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <fstream>
#include <iostream>
#include <cuda.h>
// #include <boost/format.hpp>
#include "game_display.h"
#include "game_scene/gssim_view.h"
#include "../game_core/twgame.cu"
// #include "game_core/physics.h"
// #include "game_core/tankbullet.h"
// #include "game_core/basictank.h"
#define SCALE_FACTOR 12
#define CUDA_BLOCKS 1
#define CUDA_THREADS MAX_ARRAY_SIZE
using namespace std;
// using namespace boost;
// helper function
void apply_transform(CL_GraphicContext* gc, Physics::vec2& c){
c.x *= SCALE_FACTOR;
c.y *= -SCALE_FACTOR;
c.x += gc->get_width()/2;
c.y += gc->get_height()/2;
}
GSSimView::GSSimView(CL_GraphicContext& gc, CL_ResourceManager& resources,
sim_data& sd)
: m_physrunner(new Physics::PhysRunner::RunnerCore()),
m_simd(sd){
// setup the debug text
CL_FontDescription desc;
desc.set_typeface_name("monospace");
desc.set_height(12);
m_debugfont.reset(new CL_Font_System(gc, desc));
m_background.reset(new CL_Sprite(gc,
"game_assets/background",
&resources));
m_testbullet.reset(new CL_Sprite(gc,
"game_assets/bullet",
&resources));
m_testtank.reset(new CL_Sprite(gc,
"game_assets/tank_blu",
&resources));
m_testtank2.reset(new CL_Sprite(gc,
"game_assets/tank_red",
&resources));
Physics::PhysRunner::initialize(m_physrunner.get());
TankBullet::initialize(&m_bullets, m_physrunner.get());
BasicTank::initialize(&m_tanks, m_physrunner.get(), &m_bullets);
AI::initialize(&m_ai, &m_tanks, &m_bullets);
m_playertank = 0;
m_player2tank = 1;
// get the simulation data
m_tanks = m_simd.tc;
m_bullets = m_simd.bc;
m_physrunner->bodies = m_simd.bodies[0];
// reset the pointers
BasicTank::reset_pointers(&m_tanks, m_physrunner.get(), &m_bullets);
TankBullet::reset_phys_pointer(&m_bullets, m_physrunner.get());
m_frames_elapsed = 0;
}
GSSimView::~GSSimView(){
BasicTank::destroy(&m_tanks);
TankBullet::destroy(&m_bullets);
}
void GSSimView::onSceneDeactivate(){
}
void GSSimView::onSceneActivate(){
m_timer.restart();
}
#include <iostream>
using namespace std;
void GSSimView::onFrameRender(CL_GraphicContext* gc){
// draw the background
Physics::vec2 pos;
apply_transform(gc, pos);
m_background->draw(*gc, (f32)pos.x, (f32)pos.y);
// draw the bullets, yes, we're cheating the numbers
// OOP can wait another day
for(int i = 0; i < 3; ++i){
pos = TankBullet::get_bullet_pos(&m_bullets, i);
apply_transform(gc, pos);
// if(m_bullets.state[i] != BULLET_STATE_INACTIVE){
m_testbullet->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
}
// draw the tanks
// if(m_tanks.state[m_playertank] != TANK_STATE_INACTIVE){
pos = BasicTank::get_tank_pos(&m_tanks, m_playertank);
apply_transform(gc, pos);
f32 rot = BasicTank::get_tank_rot(&m_tanks, m_playertank);
m_testtank->set_angle(CL_Angle(-rot, cl_degrees));
m_testtank->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
// if(m_tanks.state[m_player2tank] != TANK_STATE_INACTIVE){
pos = BasicTank::get_tank_pos(&m_tanks, m_player2tank);
apply_transform(gc, pos);
rot = BasicTank::get_tank_rot(&m_tanks, m_player2tank);
m_testtank2->set_angle(CL_Angle(-rot, cl_degrees));
m_testtank2->draw(*gc, (f32)pos.x, (f32)pos.y);
// }
// Debug info
CL_StringFormat fmt("States: %1 %2 %3 %4 | Player pos: %5 %6 | Time elapsed: %7");
fmt.set_arg(1, m_ai.bullet_vector[0]);
fmt.set_arg(2, m_ai.tank_vector[0]);
fmt.set_arg(3, m_ai.direction_state[0]);
fmt.set_arg(4, m_ai.distance_state[0]);
pos = Physics::PhysRunner::get_cur_pos(m_physrunner.get(),
m_tanks.phys_id[m_playertank]);
fmt.set_arg(5, pos.x);
fmt.set_arg(6, pos.y);
fmt.set_arg(7, m_frames_elapsed);
m_dbgmsg = fmt.get_result();
m_debugfont->draw_text(*gc, 1, 12, m_dbgmsg, CL_Colorf::red);
}
void GSSimView::onFrameUpdate(double dt,
CL_InputDevice* keyboard,
CL_InputDevice* mouse){
if(m_frames_elapsed < MAX_BODY_RECORD){
// perform all the update
//AI::timestep(&m_ai, dt);
//Physics::PhysRunner::timestep(m_physrunner.get(), dt);
// copy over the timestep data
m_physrunner->bodies = m_simd.bodies[m_frames_elapsed];
Physics::vec2 pos = m_physrunner->bodies.cur_pos.get_vec2(m_tanks.phys_id[0]);
f32 rot = m_physrunner->bodies.rotation[m_tanks.phys_id[0]];
// cout << format("E.x: %1 E.y: %2 E.r: %3") % pos.x % pos.y % rot << endl;
// printf("E.x: %.12f E.y: %.12f E.r: %.12f\n", pos.x, pos.y, rot);
TankBullet::update(&m_bullets, dt);
BasicTank::update(&m_tanks, dt);
// perform collision detection for the bullets
for(int i = 0; i < MAX_BULLETS; ++i){
Collision::bullet_tank_check(&m_bullets, &m_tanks, i);
}
// perform collision detection for tanks
for(int i = 0; i < MAX_TANKS; ++i){
Collision::tank_tank_check(&m_tanks, i);
}
}
// update the sprites
m_background->update();
m_testbullet->update();
m_testtank->update();
// if(m_tanks.state[0] != TANK_STATE_INACTIVE){
Physics::vec2 pos = BasicTank::get_tank_pos(&m_tanks, m_playertank);
// if(pos.x != OFFSCREEN_X && pos.y != OFFSCREEN_Y){
++m_frames_elapsed;
// }
}
|
c46eb34e966622391c0a094d613f3e0306253f69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/WarpPackedBits.cuh>
#include <limits>
namespace faiss {
namespace gpu {
// A basic implementation that works for the interleaved by vector layout for
// any number of sub-quantizers
template <typename EncodeT, int EncodeBits, typename CodeDistanceT>
__global__ void pqScanPrecomputedInterleaved(
Tensor<float, 2, true> queries,
// (query id)(probe id)
Tensor<float, 2, true> precompTerm1,
// (centroid id)(sub q)(code id)
Tensor<CodeDistanceT, 3, true> precompTerm2,
// (query id)(sub q)(code id)
Tensor<CodeDistanceT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// Each block handles a single query versus single list
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
int numWarps = blockDim.x / kWarpSize;
// FIXME: some issue with getLaneId() and CUDA 10.1 and P4 GPUs?
int laneId = threadIdx.x % kWarpSize;
int warpId = threadIdx.x / kWarpSize;
auto numSubQuantizers = precompTerm2.getSize(1);
auto codesPerSubQuantizer = precompTerm2.getSize(2);
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto vecsBase = (EncodeT*)listCodes[listId];
int numVecs = listLengths[listId];
// How many vector blocks of 32 are in this list?
int numBlocks = utils::divUp(numVecs, 32);
// Number of EncodeT words per each dimension of block of 32 vecs
constexpr int bytesPerVectorBlockDim = EncodeBits * 32 / 8;
constexpr int wordsPerVectorBlockDim =
bytesPerVectorBlockDim / sizeof(EncodeT);
int wordsPerVectorBlock = wordsPerVectorBlockDim * numSubQuantizers;
// This is constant for the (query, probe)
float term1 = precompTerm1[queryId][probeId];
for (int block = warpId; block < numBlocks; block += numWarps) {
float dist = term1;
// This is the vector a given lane/thread handles
int vec = block * kWarpSize + laneId;
bool valid = vec < numVecs;
EncodeT* data = vecsBase + block * wordsPerVectorBlock;
auto term2Base = precompTerm2[listId].data();
auto term3Base = precompTerm3[queryId].data();
for (int sq = 0; sq < numSubQuantizers; ++sq) {
EncodeT enc =
WarpPackedBits<EncodeT, EncodeBits>::read(laneId, data);
EncodeT code =
WarpPackedBits<EncodeT, EncodeBits>::postRead(laneId, enc);
dist += valid ? (ConvertTo<float>::to(term2Base[code]) +
ConvertTo<float>::to(term3Base[code]))
: 0;
data += wordsPerVectorBlockDim;
term2Base += codesPerSubQuantizer;
term3Base += codesPerSubQuantizer;
}
if (valid) {
distanceOut[vec] = dist;
}
}
}
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void loadPrecomputedTerm(
LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*)smem;
LookupVecT* term2StartV = (LookupVecT*)term2Start;
LookupVecT* term3StartV = (LookupVecT*)term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = LoadStore<LookupVecT>::load(
&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q = LoadStore<LookupVecT>::load(
&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(
&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(
vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void pqScanPrecomputedMultiPass(
Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*)smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
uint8_t* codeList = (uint8_t*)listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 =
NumSubQuantizers <= 4 ? 1 : (NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(
term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x; codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset = codesPerSubQuantizer *
(word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void runMultiPassTile(
GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
bool interleavedCodeLayout,
int bitsPerSubQuantizer,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
hipStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res,
topQueryToCentroid,
listLengths,
prefixSumOffsets,
thrustMem,
stream);
// The vector interleaved layout implementation
if (interleavedCodeLayout) {
auto kThreadsPerBlock = 256;
auto grid = dim3(
topQueryToCentroid.getSize(1), topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
#define RUN_INTERLEAVED(BITS_PER_CODE, CODE_DIST_T) \
do { \
hipLaunchKernelGGL(( pqScanPrecomputedInterleaved<uint8_t, BITS_PER_CODE, CODE_DIST_T>) \
, dim3(grid), dim3(block), 0, stream, \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
if (useFloat16Lookup) {
auto precompTerm2T = precompTerm2.toTensor<half>();
auto precompTerm3T = precompTerm3.toTensor<half>();
switch (bitsPerSubQuantizer) {
case 4: {
RUN_INTERLEAVED(4, half);
} break;
case 5: {
RUN_INTERLEAVED(5, half);
} break;
case 6: {
RUN_INTERLEAVED(6, half);
} break;
case 8: {
RUN_INTERLEAVED(8, half);
} break;
default:
FAISS_ASSERT(false);
break;
}
} else {
auto precompTerm2T = precompTerm2.toTensor<float>();
auto precompTerm3T = precompTerm3.toTensor<float>();
switch (bitsPerSubQuantizer) {
case 4: {
RUN_INTERLEAVED(4, float);
} break;
case 5: {
RUN_INTERLEAVED(5, float);
} break;
case 6: {
RUN_INTERLEAVED(6, float);
} break;
case 8: {
RUN_INTERLEAVED(8, float);
} break;
default:
FAISS_ASSERT(false);
break;
}
}
} else {
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
auto kThreadsPerBlock = 256;
auto grid = dim3(
topQueryToCentroid.getSize(1), topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
hipLaunchKernelGGL(( pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T>) \
, dim3(grid), dim3(block), smem, stream, \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (numSubQuantizers) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
#undef RUN_INTERLEAVED
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(
prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(
flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(
Tensor<float, 2, true>& queries,
// (query id)(probe id)
Tensor<float, 2, true>& precompTerm1,
// (centroid id)(sub q)(code id)
NoTypeTensor<3, true>& precompTerm2,
// (query id)(sub q)(code id)
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
bool interleavedCodeLayout,
int bitsPerSubQuantizer,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<Index::idx_t, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] = {&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery = 2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int)(sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(
queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(), {queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(), {queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] = {
&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(
prefixSumOffsetSpace1.data(), 0, sizeof(int), stream));
CUDA_VERIFY(hipMemsetAsync(
prefixSumOffsetSpace2.data(), 0, sizeof(int), stream));
DeviceTensor<float, 1, true> allDistances1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] = {
&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] = {
&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] = {&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(
0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView = queries.narrowOutermost(query, numQueriesInTile);
auto term1View = precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View = precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(
res,
queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
interleavedCodeLayout,
bitsPerSubQuantizer,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} // namespace gpu
} // namespace faiss
| c46eb34e966622391c0a094d613f3e0306253f69.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/PQScanMultiPassPrecomputed.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/WarpPackedBits.cuh>
#include <limits>
namespace faiss {
namespace gpu {
// A basic implementation that works for the interleaved by vector layout for
// any number of sub-quantizers
template <typename EncodeT, int EncodeBits, typename CodeDistanceT>
__global__ void pqScanPrecomputedInterleaved(
Tensor<float, 2, true> queries,
// (query id)(probe id)
Tensor<float, 2, true> precompTerm1,
// (centroid id)(sub q)(code id)
Tensor<CodeDistanceT, 3, true> precompTerm2,
// (query id)(sub q)(code id)
Tensor<CodeDistanceT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// Each block handles a single query versus single list
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
int numWarps = blockDim.x / kWarpSize;
// FIXME: some issue with getLaneId() and CUDA 10.1 and P4 GPUs?
int laneId = threadIdx.x % kWarpSize;
int warpId = threadIdx.x / kWarpSize;
auto numSubQuantizers = precompTerm2.getSize(1);
auto codesPerSubQuantizer = precompTerm2.getSize(2);
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto vecsBase = (EncodeT*)listCodes[listId];
int numVecs = listLengths[listId];
// How many vector blocks of 32 are in this list?
int numBlocks = utils::divUp(numVecs, 32);
// Number of EncodeT words per each dimension of block of 32 vecs
constexpr int bytesPerVectorBlockDim = EncodeBits * 32 / 8;
constexpr int wordsPerVectorBlockDim =
bytesPerVectorBlockDim / sizeof(EncodeT);
int wordsPerVectorBlock = wordsPerVectorBlockDim * numSubQuantizers;
// This is constant for the (query, probe)
float term1 = precompTerm1[queryId][probeId];
for (int block = warpId; block < numBlocks; block += numWarps) {
float dist = term1;
// This is the vector a given lane/thread handles
int vec = block * kWarpSize + laneId;
bool valid = vec < numVecs;
EncodeT* data = vecsBase + block * wordsPerVectorBlock;
auto term2Base = precompTerm2[listId].data();
auto term3Base = precompTerm3[queryId].data();
for (int sq = 0; sq < numSubQuantizers; ++sq) {
EncodeT enc =
WarpPackedBits<EncodeT, EncodeBits>::read(laneId, data);
EncodeT code =
WarpPackedBits<EncodeT, EncodeBits>::postRead(laneId, enc);
dist += valid ? (ConvertTo<float>::to(term2Base[code]) +
ConvertTo<float>::to(term3Base[code]))
: 0;
data += wordsPerVectorBlockDim;
term2Base += codesPerSubQuantizer;
term3Base += codesPerSubQuantizer;
}
if (valid) {
distanceOut[vec] = dist;
}
}
}
// For precomputed codes, this calculates and loads code distances
// into smem
template <typename LookupT, typename LookupVecT>
inline __device__ void loadPrecomputedTerm(
LookupT* smem,
LookupT* term2Start,
LookupT* term3Start,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use vector loads if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
constexpr int kUnroll = 2;
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*)smem;
LookupVecT* term2StartV = (LookupVecT*)term2Start;
LookupVecT* term3StartV = (LookupVecT*)term3Start;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = LoadStore<LookupVecT>::load(
&term2StartV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LookupVecT q = LoadStore<LookupVecT>::load(
&term3StartV[i + j * blockDim.x]);
vals[j] = Math<LookupVecT>::add(vals[j], q);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(
&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = term2Start[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = Math<LookupT>::add(
vals[j], term3Start[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = Math<LookupT>::add(term2Start[i], term3Start[i]);
}
}
}
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void pqScanPrecomputedMultiPass(
Tensor<float, 2, true> queries,
Tensor<float, 2, true> precompTerm1,
Tensor<LookupT, 3, true> precompTerm2,
Tensor<LookupT, 3, true> precompTerm3,
Tensor<int, 2, true> topQueryToCentroid,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
// precomputed term 2 + 3 storage
// (sub q)(code id)
extern __shared__ char smemTerm23[];
LookupT* term23 = (LookupT*)smemTerm23;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
auto codesPerSubQuantizer = precompTerm2.getSize(2);
auto precompTermSize = precompTerm2.getSize(1) * codesPerSubQuantizer;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
uint8_t* codeList = (uint8_t*)listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 =
NumSubQuantizers <= 4 ? 1 : (NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
// Load precomputed terms 1, 2, 3
float term1 = precompTerm1[queryId][probeId];
loadPrecomputedTerm<LookupT, LookupVecT>(
term23,
precompTerm2[listId].data(),
precompTerm3[queryId].data(),
precompTermSize);
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x; codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = term1;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(term23[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset = codesPerSubQuantizer *
(word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(term23[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void runMultiPassTile(
GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& precompTerm1,
NoTypeTensor<3, true>& precompTerm2,
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
bool interleavedCodeLayout,
int bitsPerSubQuantizer,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
cudaStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res,
topQueryToCentroid,
listLengths,
prefixSumOffsets,
thrustMem,
stream);
// The vector interleaved layout implementation
if (interleavedCodeLayout) {
auto kThreadsPerBlock = 256;
auto grid = dim3(
topQueryToCentroid.getSize(1), topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
#define RUN_INTERLEAVED(BITS_PER_CODE, CODE_DIST_T) \
do { \
pqScanPrecomputedInterleaved<uint8_t, BITS_PER_CODE, CODE_DIST_T> \
<<<grid, block, 0, stream>>>( \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
if (useFloat16Lookup) {
auto precompTerm2T = precompTerm2.toTensor<half>();
auto precompTerm3T = precompTerm3.toTensor<half>();
switch (bitsPerSubQuantizer) {
case 4: {
RUN_INTERLEAVED(4, half);
} break;
case 5: {
RUN_INTERLEAVED(5, half);
} break;
case 6: {
RUN_INTERLEAVED(6, half);
} break;
case 8: {
RUN_INTERLEAVED(8, half);
} break;
default:
FAISS_ASSERT(false);
break;
}
} else {
auto precompTerm2T = precompTerm2.toTensor<float>();
auto precompTerm3T = precompTerm3.toTensor<float>();
switch (bitsPerSubQuantizer) {
case 4: {
RUN_INTERLEAVED(4, float);
} break;
case 5: {
RUN_INTERLEAVED(5, float);
} break;
case 6: {
RUN_INTERLEAVED(6, float);
} break;
case 8: {
RUN_INTERLEAVED(8, float);
} break;
default:
FAISS_ASSERT(false);
break;
}
}
} else {
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
auto kThreadsPerBlock = 256;
auto grid = dim3(
topQueryToCentroid.getSize(1), topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq precomputed terms (2 + 3)
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto precompTerm2T = precompTerm2.toTensor<LOOKUP_T>(); \
auto precompTerm3T = precompTerm3.toTensor<LOOKUP_T>(); \
\
pqScanPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T> \
<<<grid, block, smem, stream>>>( \
queries, \
precompTerm1, \
precompTerm2T, \
precompTerm3T, \
topQueryToCentroid, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (numSubQuantizers) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
CUDA_TEST_ERROR();
#undef RUN_PQ
#undef RUN_PQ_OPT
#undef RUN_INTERLEAVED
}
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(
prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(
flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
CUDA_TEST_ERROR();
}
void runPQScanMultiPassPrecomputed(
Tensor<float, 2, true>& queries,
// (query id)(probe id)
Tensor<float, 2, true>& precompTerm1,
// (centroid id)(sub q)(code id)
NoTypeTensor<3, true>& precompTerm2,
// (query id)(sub q)(code id)
NoTypeTensor<3, true>& precompTerm3,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
bool interleavedCodeLayout,
int bitsPerSubQuantizer,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<Index::idx_t, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] = {&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery = 2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int)(sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(
queryTileSize * nprobe * maxListLength <=
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(), {queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(), {queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] = {
&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(
prefixSumOffsetSpace1.data(), 0, sizeof(int), stream));
CUDA_VERIFY(cudaMemsetAsync(
prefixSumOffsetSpace2.data(), 0, sizeof(int), stream));
DeviceTensor<float, 1, true> allDistances1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] = {
&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] = {
&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res,
makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] = {&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(
0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView = queries.narrowOutermost(query, numQueriesInTile);
auto term1View = precompTerm1.narrowOutermost(query, numQueriesInTile);
auto term3View = precompTerm3.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(
res,
queryView,
term1View,
precompTerm2,
term3View,
coarseIndicesView,
useFloat16Lookup,
interleavedCodeLayout,
bitsPerSubQuantizer,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} // namespace gpu
} // namespace faiss
|
cc8fd690fac775421f37231d48be852262c83b97.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define WARP 8
#define WARP2 16
#define WARP3 24
__global__ void incrementArrayOnDevice(int *x, int*y, int*ra, int*rb, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int r0 = -2;
int r1 = -2;
int warp = idx / WARP;
int div = warp % 4;
if (div == 0) {x[idx] = 1;}
else if (div == 2) {x[idx - WARP2] = 2;}
else if (div == 1) {
y [idx] = 1;
r0 = x[idx - WARP];
ra[idx-WARP] = r0;
r1 = x[idx - WARP];
rb[idx-WARP] = r1;}
else
{
r0 = x[idx - WARP3];
ra[idx-WARP2] = r0; // notice the warp2
r1 = x[idx - WARP3];
rb[idx-WARP2] = r1;}
/*
switch (warp % 4)
{
case 0:
y[idx + WARP] = 1;
x[idx] = 1; break;
case 2:
x[idx - WARP2] = 2; y [idx] = 1; break;
case 1:
r0 = x[idx - WARP];
r1 = x[idx - WARP];
ra[idx-WARP] = r0;
rb[idx-WARP] = r1;
break;
case 3:
r0 = x[idx - WARP3];
r1 = x[idx - WARP3];
ra[idx-WARP2] = r0; // notice the warp2
rb[idx-WARP2] = r1;
break;
};*/
}
inline static int final_cond(int _out_1_r0,int _out_1_r1,int _out_3_r0,int _out_3_r1) {
int cond;
cond = (((_out_1_r0 == 2) && (((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && (_out_3_r1 == 1))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))))) || ((_out_1_r0 == 1) && (((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && (_out_3_r1 == 2)) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))))) || ((_out_1_r0 == 0) && ((((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))) || ((_out_1_r1 == 0) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))));
return cond;
}
int main(void)
{
int *a_h, *b_h, *ra_h, *rb_h; // pointers to host memory
int *a_d, *b_d, *ra_d, *rb_d; // pointers to device memory
int N = WARP * 1000 ;
int i;
size_t size = N*sizeof(int);
// allocate arrays on host
a_h = (int *)malloc(size);
b_h = (int *)malloc(size);
ra_h = (int *)malloc(size);
rb_h = (int *)malloc(size);
// allocate arrays on device
hipMalloc((void **) &a_d, size);
hipMalloc((void **) &b_d, size);
hipMalloc((void **) &ra_d, size);
hipMalloc((void **) &rb_d, size);
int finished = 0;
int iteration = 0;
while (!finished)
{
if (iteration % 1000 ==0) {printf("iteration:%i\n", iteration);};
// initialize host data
for (i=0; i<N; i++) {
a_h[i] = 0;
b_h[i] = 0;
ra_h[i] = -1;
rb_h[i] = -1;
}
// send data from host to device: a_h to a_d
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice);
hipMemcpy(ra_d, ra_h, size, hipMemcpyHostToDevice);
hipMemcpy(rb_d, rb_h, size, hipMemcpyHostToDevice);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = WARP;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, a_d, b_d, ra_d, rb_d, N);
hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost);
hipMemcpy(b_h, b_d, size, hipMemcpyDeviceToHost);
hipMemcpy(ra_h, ra_d, size, hipMemcpyDeviceToHost);
hipMemcpy(rb_h, rb_d, size, hipMemcpyDeviceToHost);
// check result
for (i=0; i< N/(WARP * 4); i++)
{
for (int j = 0; j < WARP; j ++)
{
int k = i * WARP * 4 + j;
if ((ra_h[k] != rb_h[k]) || ra_h[k+WARP] != rb_h[k+ WARP])
printf ("%i: %i %i %i %i \n", k, ra_h[k],rb_h[k], ra_h[k + WARP],rb_h[k+WARP]);
if (!(final_cond(ra_h[k],rb_h[k], ra_h[k + WARP],rb_h[k+WARP]))) {finished = 1;}
}
};
iteration ++;
}
printf("found witness after %i iterations\n",iteration);
// cleanup
free(a_h); free(b_h); free(ra_h); free(rb_h);
hipFree(a_d); hipFree(b_d); hipFree(ra_d); hipFree(rb_d);
}
| cc8fd690fac775421f37231d48be852262c83b97.cu |
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define WARP 8
#define WARP2 16
#define WARP3 24
__global__ void incrementArrayOnDevice(int *x, int*y, int*ra, int*rb, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int r0 = -2;
int r1 = -2;
int warp = idx / WARP;
int div = warp % 4;
if (div == 0) {x[idx] = 1;}
else if (div == 2) {x[idx - WARP2] = 2;}
else if (div == 1) {
y [idx] = 1;
r0 = x[idx - WARP];
ra[idx-WARP] = r0;
r1 = x[idx - WARP];
rb[idx-WARP] = r1;}
else
{
r0 = x[idx - WARP3];
ra[idx-WARP2] = r0; // notice the warp2
r1 = x[idx - WARP3];
rb[idx-WARP2] = r1;}
/*
switch (warp % 4)
{
case 0:
y[idx + WARP] = 1;
x[idx] = 1; break;
case 2:
x[idx - WARP2] = 2; y [idx] = 1; break;
case 1:
r0 = x[idx - WARP];
r1 = x[idx - WARP];
ra[idx-WARP] = r0;
rb[idx-WARP] = r1;
break;
case 3:
r0 = x[idx - WARP3];
r1 = x[idx - WARP3];
ra[idx-WARP2] = r0; // notice the warp2
rb[idx-WARP2] = r1;
break;
};*/
}
inline static int final_cond(int _out_1_r0,int _out_1_r1,int _out_3_r0,int _out_3_r1) {
int cond;
cond = (((_out_1_r0 == 2) && (((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && (_out_3_r1 == 1))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))))) || ((_out_1_r0 == 1) && (((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && (_out_3_r1 == 2)) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))))) || ((_out_1_r0 == 0) && ((((_out_1_r1 == 2) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0))))) || ((_out_1_r1 == 1) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))) || ((_out_1_r1 == 0) && ((((_out_3_r0 == 2) && ((_out_3_r1 == 2) || (_out_3_r1 == 1))) || ((_out_3_r0 == 1) && ((_out_3_r1 == 2) || (_out_3_r1 == 1)))) || ((_out_3_r0 == 0) && (((_out_3_r1 == 2) || (_out_3_r1 == 1)) || (_out_3_r1 == 0)))))));
return cond;
}
int main(void)
{
int *a_h, *b_h, *ra_h, *rb_h; // pointers to host memory
int *a_d, *b_d, *ra_d, *rb_d; // pointers to device memory
int N = WARP * 1000 ;
int i;
size_t size = N*sizeof(int);
// allocate arrays on host
a_h = (int *)malloc(size);
b_h = (int *)malloc(size);
ra_h = (int *)malloc(size);
rb_h = (int *)malloc(size);
// allocate arrays on device
cudaMalloc((void **) &a_d, size);
cudaMalloc((void **) &b_d, size);
cudaMalloc((void **) &ra_d, size);
cudaMalloc((void **) &rb_d, size);
int finished = 0;
int iteration = 0;
while (!finished)
{
if (iteration % 1000 ==0) {printf("iteration:%i\n", iteration);};
// initialize host data
for (i=0; i<N; i++) {
a_h[i] = 0;
b_h[i] = 0;
ra_h[i] = -1;
rb_h[i] = -1;
}
// send data from host to device: a_h to a_d
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(ra_d, ra_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(rb_d, rb_h, size, cudaMemcpyHostToDevice);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = WARP;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, b_d, ra_d, rb_d, N);
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(ra_h, ra_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(rb_h, rb_d, size, cudaMemcpyDeviceToHost);
// check result
for (i=0; i< N/(WARP * 4); i++)
{
for (int j = 0; j < WARP; j ++)
{
int k = i * WARP * 4 + j;
if ((ra_h[k] != rb_h[k]) || ra_h[k+WARP] != rb_h[k+ WARP])
printf ("%i: %i %i %i %i \n", k, ra_h[k],rb_h[k], ra_h[k + WARP],rb_h[k+WARP]);
if (!(final_cond(ra_h[k],rb_h[k], ra_h[k + WARP],rb_h[k+WARP]))) {finished = 1;}
}
};
iteration ++;
}
printf("found witness after %i iterations\n",iteration);
// cleanup
free(a_h); free(b_h); free(ra_h); free(rb_h);
cudaFree(a_d); cudaFree(b_d); cudaFree(ra_d); cudaFree(rb_d);
}
|
03deef8eaae4f76db3d739908e1496822db1f81e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../kernels.hpp"
#include "sph.cuh"
#include "utils.cuh"
namespace sphexa
{
namespace sph
{
namespace cuda
{
namespace kernels
{
template <typename T>
__global__ void computeIAD(const int n, const T sincIndex, const T K, const int ngmax, const BBox<T> *bbox, const int *clist,
const int *neighbors, const int *neighborsCount, const T *x, const T *y, const T *z, const T *h, const T *m,
const T *ro, T *c11, T *c12, T *c13, T *c22, T *c23, T *c33)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) return;
const int i = clist[tid];
const int nn = neighborsCount[tid];
T tau11 = 0.0, tau12 = 0.0, tau13 = 0.0, tau22 = 0.0, tau23 = 0.0, tau33 = 0.0;
for (int pj = 0; pj < nn; ++pj)
{
const int j = neighbors[tid * ngmax + pj];
const T dist = distancePBC(*bbox, h[i], x[i], y[i], z[i], x[j], y[j], z[j]); // store the distance from each neighbor
const T vloc = dist / h[i];
const T w = K * math_namespace::pow(wharmonic(vloc), (int)sincIndex);
const T W = w / (h[i] * h[i] * h[i]);
T r_ijx = (x[i] - x[j]);
T r_ijy = (y[i] - y[j]);
T r_ijz = (z[i] - z[j]);
applyPBC(*bbox, 2.0 * h[i], r_ijx, r_ijy, r_ijz);
tau11 += r_ijx * r_ijx * m[j] / ro[j] * W;
tau12 += r_ijx * r_ijy * m[j] / ro[j] * W;
tau13 += r_ijx * r_ijz * m[j] / ro[j] * W;
tau22 += r_ijy * r_ijy * m[j] / ro[j] * W;
tau23 += r_ijy * r_ijz * m[j] / ro[j] * W;
tau33 += r_ijz * r_ijz * m[j] / ro[j] * W;
}
const T det =
tau11 * tau22 * tau33 + 2.0 * tau12 * tau23 * tau13 - tau11 * tau23 * tau23 - tau22 * tau13 * tau13 - tau33 * tau12 * tau12;
c11[tid] = (tau22 * tau33 - tau23 * tau23) / det;
c12[tid] = (tau13 * tau23 - tau33 * tau12) / det;
c13[tid] = (tau12 * tau23 - tau22 * tau13) / det;
c22[tid] = (tau11 * tau33 - tau13 * tau13) / det;
c23[tid] = (tau13 * tau12 - tau11 * tau23) / det;
c33[tid] = (tau11 * tau22 - tau12 * tau12) / det;
}
} // namespace kernels
template void computeIAD<double, SqPatch<double>>(const std::vector<int> &clist, SqPatch<double> &d);
template <typename T, class Dataset>
void computeIAD(const std::vector<int> &clist, Dataset &d)
{
const size_t n = clist.size();
const size_t np = d.x.size();
const size_t allNeighbors = n * d.ngmax;
const size_t size_bbox = sizeof(BBox<T>);
const size_t size_np_T = np * sizeof(T);
const size_t size_n_int = n * sizeof(int);
const size_t size_n_T = n * sizeof(T);
const size_t size_allNeighbors = allNeighbors * sizeof(int);
int *d_clist, *d_neighbors, *d_neighborsCount;
T *d_x, *d_y, *d_z, *d_m, *d_h, *d_ro;
BBox<T> *d_bbox;
T *d_c11, *d_c12, *d_c13, *d_c22, *d_c23, *d_c33;
// input data
CHECK_CUDA_ERR(utils::hipMalloc(size_n_int, d_clist, d_neighborsCount));
CHECK_CUDA_ERR(utils::hipMalloc(size_allNeighbors, d_neighbors));
CHECK_CUDA_ERR(utils::hipMalloc(size_np_T, d_x, d_y, d_z, d_h, d_m, d_ro));
CHECK_CUDA_ERR(utils::hipMalloc(size_bbox, d_bbox));
// output data
CHECK_CUDA_ERR(utils::hipMalloc(size_n_T, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33));
CHECK_CUDA_ERR(hipMemcpy(d_x, d.x.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_y, d.y.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_z, d.z.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_h, d.h.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_m, d.m.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_ro, d.ro.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_bbox, &d.bbox, size_bbox, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_clist, clist.data(), size_n_int, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_neighbors, d.neighbors.data(), size_allNeighbors, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_neighborsCount, d.neighborsCount.data(), size_n_int, hipMemcpyHostToDevice));
const int threadsPerBlock = 256;
const int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( kernels::computeIAD), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, d.sincIndex, d.K, d.ngmax, d_bbox, d_clist, d_neighbors, d_neighborsCount,
d_x, d_y, d_z, d_h, d_m, d_ro, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33);
CHECK_CUDA_ERR(hipGetLastError());
CHECK_CUDA_ERR(hipMemcpy(d.c11.data(), d_c11, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.c12.data(), d_c12, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.c13.data(), d_c13, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.c22.data(), d_c22, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.c23.data(), d_c23, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.c33.data(), d_c33, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(utils::hipFree(d_bbox, d_clist, d_neighbors, d_neighborsCount, d_x, d_y, d_z, d_h, d_m, d_ro, d_c11, d_c12, d_c13,
d_c22, d_c23, d_c33));
}
} // namespace cuda
} // namespace sph
} // namespace sphexa
| 03deef8eaae4f76db3d739908e1496822db1f81e.cu | #include <cuda.h>
#include "../kernels.hpp"
#include "sph.cuh"
#include "utils.cuh"
namespace sphexa
{
namespace sph
{
namespace cuda
{
namespace kernels
{
template <typename T>
__global__ void computeIAD(const int n, const T sincIndex, const T K, const int ngmax, const BBox<T> *bbox, const int *clist,
const int *neighbors, const int *neighborsCount, const T *x, const T *y, const T *z, const T *h, const T *m,
const T *ro, T *c11, T *c12, T *c13, T *c22, T *c23, T *c33)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) return;
const int i = clist[tid];
const int nn = neighborsCount[tid];
T tau11 = 0.0, tau12 = 0.0, tau13 = 0.0, tau22 = 0.0, tau23 = 0.0, tau33 = 0.0;
for (int pj = 0; pj < nn; ++pj)
{
const int j = neighbors[tid * ngmax + pj];
const T dist = distancePBC(*bbox, h[i], x[i], y[i], z[i], x[j], y[j], z[j]); // store the distance from each neighbor
const T vloc = dist / h[i];
const T w = K * math_namespace::pow(wharmonic(vloc), (int)sincIndex);
const T W = w / (h[i] * h[i] * h[i]);
T r_ijx = (x[i] - x[j]);
T r_ijy = (y[i] - y[j]);
T r_ijz = (z[i] - z[j]);
applyPBC(*bbox, 2.0 * h[i], r_ijx, r_ijy, r_ijz);
tau11 += r_ijx * r_ijx * m[j] / ro[j] * W;
tau12 += r_ijx * r_ijy * m[j] / ro[j] * W;
tau13 += r_ijx * r_ijz * m[j] / ro[j] * W;
tau22 += r_ijy * r_ijy * m[j] / ro[j] * W;
tau23 += r_ijy * r_ijz * m[j] / ro[j] * W;
tau33 += r_ijz * r_ijz * m[j] / ro[j] * W;
}
const T det =
tau11 * tau22 * tau33 + 2.0 * tau12 * tau23 * tau13 - tau11 * tau23 * tau23 - tau22 * tau13 * tau13 - tau33 * tau12 * tau12;
c11[tid] = (tau22 * tau33 - tau23 * tau23) / det;
c12[tid] = (tau13 * tau23 - tau33 * tau12) / det;
c13[tid] = (tau12 * tau23 - tau22 * tau13) / det;
c22[tid] = (tau11 * tau33 - tau13 * tau13) / det;
c23[tid] = (tau13 * tau12 - tau11 * tau23) / det;
c33[tid] = (tau11 * tau22 - tau12 * tau12) / det;
}
} // namespace kernels
template void computeIAD<double, SqPatch<double>>(const std::vector<int> &clist, SqPatch<double> &d);
template <typename T, class Dataset>
void computeIAD(const std::vector<int> &clist, Dataset &d)
{
const size_t n = clist.size();
const size_t np = d.x.size();
const size_t allNeighbors = n * d.ngmax;
const size_t size_bbox = sizeof(BBox<T>);
const size_t size_np_T = np * sizeof(T);
const size_t size_n_int = n * sizeof(int);
const size_t size_n_T = n * sizeof(T);
const size_t size_allNeighbors = allNeighbors * sizeof(int);
int *d_clist, *d_neighbors, *d_neighborsCount;
T *d_x, *d_y, *d_z, *d_m, *d_h, *d_ro;
BBox<T> *d_bbox;
T *d_c11, *d_c12, *d_c13, *d_c22, *d_c23, *d_c33;
// input data
CHECK_CUDA_ERR(utils::cudaMalloc(size_n_int, d_clist, d_neighborsCount));
CHECK_CUDA_ERR(utils::cudaMalloc(size_allNeighbors, d_neighbors));
CHECK_CUDA_ERR(utils::cudaMalloc(size_np_T, d_x, d_y, d_z, d_h, d_m, d_ro));
CHECK_CUDA_ERR(utils::cudaMalloc(size_bbox, d_bbox));
// output data
CHECK_CUDA_ERR(utils::cudaMalloc(size_n_T, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33));
CHECK_CUDA_ERR(cudaMemcpy(d_x, d.x.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_y, d.y.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_z, d.z.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_h, d.h.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_m, d.m.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_ro, d.ro.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_bbox, &d.bbox, size_bbox, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_clist, clist.data(), size_n_int, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_neighbors, d.neighbors.data(), size_allNeighbors, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_neighborsCount, d.neighborsCount.data(), size_n_int, cudaMemcpyHostToDevice));
const int threadsPerBlock = 256;
const int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
kernels::computeIAD<<<blocksPerGrid, threadsPerBlock>>>(n, d.sincIndex, d.K, d.ngmax, d_bbox, d_clist, d_neighbors, d_neighborsCount,
d_x, d_y, d_z, d_h, d_m, d_ro, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33);
CHECK_CUDA_ERR(cudaGetLastError());
CHECK_CUDA_ERR(cudaMemcpy(d.c11.data(), d_c11, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.c12.data(), d_c12, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.c13.data(), d_c13, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.c22.data(), d_c22, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.c23.data(), d_c23, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.c33.data(), d_c33, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(utils::cudaFree(d_bbox, d_clist, d_neighbors, d_neighborsCount, d_x, d_y, d_z, d_h, d_m, d_ro, d_c11, d_c12, d_c13,
d_c22, d_c23, d_c33));
}
} // namespace cuda
} // namespace sph
} // namespace sphexa
|
1e9477a06048c8f2feee8be526de899128df5dcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void hello_from_gpu()
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
printf("Hello World from block %d and thread %d!\n", bid, tid);
} | 1e9477a06048c8f2feee8be526de899128df5dcd.cu | #include "includes.h"
__global__ void hello_from_gpu()
{
const int bid = blockIdx.x;
const int tid = threadIdx.x;
printf("Hello World from block %d and thread %d!\n", bid, tid);
} |
927b3665d3d6c4edf33c6b17f30d008d0f93c936.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_a;
int xdim0_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_a;
int ydim0_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_a;
int xdim1_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_a;
int ydim1_update_halo_kernel5_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_2_a * (y) + \
xdim0_update_halo_kernel5_plus_2_a * ydim0_update_halo_kernel5_plus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_2_a * (y) + \
xdim1_update_halo_kernel5_plus_2_a * ydim1_update_halo_kernel5_plus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel5_plus_2_a_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_2_a *
ydim0_update_halo_kernel5_plus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_2_a *
ydim1_update_halo_kernel5_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 85))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(85, "update_halo_kernel5_plus_2_a");
OPS_kernels[85].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_a_h ||
ydim0 != ydim0_update_halo_kernel5_plus_2_a_h ||
xdim1 != xdim1_update_halo_kernel5_plus_2_a_h ||
ydim1 != ydim1_update_halo_kernel5_plus_2_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_2_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel5_plus_2_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_2_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel5_plus_2_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_2_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel5_plus_2_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_2_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel5_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[85].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[85].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[85].mpi_time += t2 - t1;
OPS_kernels[85].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[85].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 85;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 85;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(85, "update_halo_kernel5_plus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
| 927b3665d3d6c4edf33c6b17f30d008d0f93c936.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_a;
int xdim0_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_a;
int ydim0_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_a;
int xdim1_update_halo_kernel5_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_a;
int ydim1_update_halo_kernel5_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_2_a * (y) + \
xdim0_update_halo_kernel5_plus_2_a * ydim0_update_halo_kernel5_plus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_2_a * (y) + \
xdim1_update_halo_kernel5_plus_2_a * ydim1_update_halo_kernel5_plus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel5_plus_2_a_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_2_a *
ydim0_update_halo_kernel5_plus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_2_a *
ydim1_update_halo_kernel5_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_a_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 85))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(85, "update_halo_kernel5_plus_2_a");
OPS_kernels[85].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_a_h ||
ydim0 != ydim0_update_halo_kernel5_plus_2_a_h ||
xdim1 != xdim1_update_halo_kernel5_plus_2_a_h ||
ydim1 != ydim1_update_halo_kernel5_plus_2_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_2_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel5_plus_2_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_2_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel5_plus_2_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_2_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel5_plus_2_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_2_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel5_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[85].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_2_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[85].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[85].mpi_time += t2 - t1;
OPS_kernels[85].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[85].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 85;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 85;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(85, "update_halo_kernel5_plus_2_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
f7a056f29a9224b263890aaafdc4fc149508a9a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cudaUtils.h"
#include "random.h"
#include <cassert>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int BLOCK_SIZE = 256;
}
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__global__ void initRandStateKernel(hiprandState_t* const states, const int numStates, const uint32_t seed)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < numStates)
{
hiprand_init(seed, index, 0, states + index);
}
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
int roundUpBlocks(const int num, const int blockSize)
{
return ((num - 1) / blockSize) + 1;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Random::Random(const int numStates, const unsigned int seed)
: mNumStates(numStates)
, mRandStateDevice(nullptr)
{
CudaUtils::alloc(&mRandStateDevice, mNumStates);
setSeed(seed);
}
Random::~Random()
{
CudaUtils::free(&mRandStateDevice);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void Random::setSeed(unsigned int seed, hipStream_t stream)
{
assert(mNumStates == 0 || mRandStateDevice);
const dim3 grid(roundUpBlocks(mNumStates, BLOCK_SIZE));
const dim3 block(BLOCK_SIZE);
hipLaunchKernelGGL(( initRandStateKernel), dim3(grid), dim3(block), 0, stream, mRandStateDevice, mNumStates, seed);
}
hiprandState_t* Random::getRandomStates()
{
assert(mNumStates == 0 || mRandStateDevice);
return mRandStateDevice;
}
} // namespace tts
| f7a056f29a9224b263890aaafdc4fc149508a9a0.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cudaUtils.h"
#include "random.h"
#include <cassert>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int BLOCK_SIZE = 256;
}
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__global__ void initRandStateKernel(curandState_t* const states, const int numStates, const uint32_t seed)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < numStates)
{
curand_init(seed, index, 0, states + index);
}
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
int roundUpBlocks(const int num, const int blockSize)
{
return ((num - 1) / blockSize) + 1;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Random::Random(const int numStates, const unsigned int seed)
: mNumStates(numStates)
, mRandStateDevice(nullptr)
{
CudaUtils::alloc(&mRandStateDevice, mNumStates);
setSeed(seed);
}
Random::~Random()
{
CudaUtils::free(&mRandStateDevice);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void Random::setSeed(unsigned int seed, cudaStream_t stream)
{
assert(mNumStates == 0 || mRandStateDevice);
const dim3 grid(roundUpBlocks(mNumStates, BLOCK_SIZE));
const dim3 block(BLOCK_SIZE);
initRandStateKernel<<<grid, block, 0, stream>>>(mRandStateDevice, mNumStates, seed);
}
curandState_t* Random::getRandomStates()
{
assert(mNumStates == 0 || mRandStateDevice);
return mRandStateDevice;
}
} // namespace tts
|
1c90e6b04391444ca6b6eb3c0c2b270c6673ac46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cmath>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <vector>
#include <queue>
#include <limits.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define FW_MAX 50000
#define BLOCK_SIZE 16
int nodes; //number of nodes
int* matrix; //Input matrix
int* FWDistanceMatrix; // Distance matrix for Floyd-Warshall
int* FWPathMatrix; // Path Matrix for Floyd-Warshall
//measuring function
double get_wall_time() {
struct timeval time;
if (gettimeofday(&time, NULL)) {
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
double get_cpu_time() {
return (double)clock() / CLOCKS_PER_SEC;
}
void TESTPrintMatrix() {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
cout << FWDistanceMatrix[i * nodes + j] << " ";
}
cout << endl;
}
}
__global__ void GPU_FloydWarshall(int i, int * deviceDistanceMatrix, int * devicePathMatrix, int nodes) {
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idy < nodes && idx < nodes)
{
int actuatPosition = idy * nodes + idx;
int newDistance = deviceDistanceMatrix[idy * nodes + i] + deviceDistanceMatrix[nodes * i + idx];
int oldDistance = deviceDistanceMatrix[actuatPosition];
if (newDistance < oldDistance)
{
deviceDistanceMatrix[actuatPosition] = newDistance;
devicePathMatrix[actuatPosition] = devicePathMatrix[i * nodes + idx];
}
}
}
void FloydWarshall() {
int *deviceDistanceMatrix;
int *devicePathMatrix;
hipError_t err;
err = hipSetDevice(0);
if (err != hipSuccess) cout << "CHYBA!" << endl;
err = hipMalloc((int**)&deviceDistanceMatrix, nodes * nodes * sizeof(int));
if (err != hipSuccess) cout << "chyba" << endl;
err = hipMalloc((int**)&devicePathMatrix, nodes * nodes * sizeof(int));
if (err != hipSuccess) cout << "chyba" << endl;
err = hipMemcpy(deviceDistanceMatrix, FWDistanceMatrix, nodes * nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) cout << "chyba" << endl;
err = hipMemcpy(devicePathMatrix, FWPathMatrix, nodes * nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) cout << "chyba" << endl;
dim3 dimGrid((nodes - 1) / BLOCK_SIZE + 1, (nodes - 1) / BLOCK_SIZE + 1, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
for ( int i = 0; i < nodes; i++)
{
hipLaunchKernelGGL(( GPU_FloydWarshall) , dim3(dimGrid), dim3(dimBlock), 0, 0, i, deviceDistanceMatrix, devicePathMatrix, nodes);
err = hipDeviceSynchronize();
if (err != hipSuccess) cout << "Error" << endl;
}
hipMemcpy(FWDistanceMatrix, deviceDistanceMatrix, nodes * nodes * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(FWPathMatrix, devicePathMatrix, nodes * nodes * sizeof(int), hipMemcpyDeviceToHost);
hipFree(devicePathMatrix);
hipFree(deviceDistanceMatrix);
}
bool loadMatrix(const char * matrixPath) {
string line;
ifstream inFile (matrixPath);
if (!inFile.is_open()) {
cout << "Wrong path to file" << endl;
return false;
}
getline(inFile, line);
nodes = atoi(line.c_str());
FWDistanceMatrix = new int [nodes * nodes];
FWPathMatrix = new int [nodes * nodes];
for (int j = 0; j < nodes; j++)
{
getline(inFile, line);
istringstream is(line);
for (int i = 0; i < nodes; i++)
{
is >> FWDistanceMatrix[j * nodes + i];
if (i != j && FWDistanceMatrix[j * nodes + i] == 0)
FWDistanceMatrix[j * nodes + i] = FW_MAX;
}
}
//initialize predecessors
for ( int i = 0; i < nodes; i++)
for ( int j = 0; j < nodes; j++)
FWPathMatrix[i * nodes + j] = ( FWDistanceMatrix[i * nodes + j] == FW_MAX ? -1 : i);
return true;
}
int main( int argc, const char* argv[] )
{
if ( argc != 2 ) {
cout << "Bad Input.. 1st parameter: Path to file." << endl;
return 1;
}
loadMatrix(argv[1]);
//start of measuring
double wall0 = get_wall_time();
double cpu0 = get_cpu_time();
// Initialize CUDA Event
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
FloydWarshall();
// Finish recording
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Calculate elasped time
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 1000;
printf ("Timee : %f s\n", elapsedTime);
//end of measuring
double wall1 = get_wall_time();
double cpu1 = get_cpu_time();
//TESTPrintMatrix();
//prints results
/* for (int i = 0; i < ncg.getNodes(); i++) {
ncg.FWShortestPathFrom(i);
}*/
cout << "Wall Time = " << wall1 - wall0 << endl;
cout << "CPU Time = " << cpu1 - cpu0 << endl;
return 0;
}
| 1c90e6b04391444ca6b6eb3c0c2b270c6673ac46.cu | #include <stdio.h>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cmath>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <vector>
#include <queue>
#include <limits.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define FW_MAX 50000
#define BLOCK_SIZE 16
int nodes; //number of nodes
int* matrix; //Input matrix
int* FWDistanceMatrix; // Distance matrix for Floyd-Warshall
int* FWPathMatrix; // Path Matrix for Floyd-Warshall
//measuring function
double get_wall_time() {
struct timeval time;
if (gettimeofday(&time, NULL)) {
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
double get_cpu_time() {
return (double)clock() / CLOCKS_PER_SEC;
}
void TESTPrintMatrix() {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
cout << FWDistanceMatrix[i * nodes + j] << " ";
}
cout << endl;
}
}
__global__ void GPU_FloydWarshall(int i, int * deviceDistanceMatrix, int * devicePathMatrix, int nodes) {
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idy < nodes && idx < nodes)
{
int actuatPosition = idy * nodes + idx;
int newDistance = deviceDistanceMatrix[idy * nodes + i] + deviceDistanceMatrix[nodes * i + idx];
int oldDistance = deviceDistanceMatrix[actuatPosition];
if (newDistance < oldDistance)
{
deviceDistanceMatrix[actuatPosition] = newDistance;
devicePathMatrix[actuatPosition] = devicePathMatrix[i * nodes + idx];
}
}
}
void FloydWarshall() {
int *deviceDistanceMatrix;
int *devicePathMatrix;
cudaError_t err;
err = cudaSetDevice(0);
if (err != cudaSuccess) cout << "CHYBA!" << endl;
err = cudaMalloc((int**)&deviceDistanceMatrix, nodes * nodes * sizeof(int));
if (err != cudaSuccess) cout << "chyba" << endl;
err = cudaMalloc((int**)&devicePathMatrix, nodes * nodes * sizeof(int));
if (err != cudaSuccess) cout << "chyba" << endl;
err = cudaMemcpy(deviceDistanceMatrix, FWDistanceMatrix, nodes * nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) cout << "chyba" << endl;
err = cudaMemcpy(devicePathMatrix, FWPathMatrix, nodes * nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) cout << "chyba" << endl;
dim3 dimGrid((nodes - 1) / BLOCK_SIZE + 1, (nodes - 1) / BLOCK_SIZE + 1, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
for ( int i = 0; i < nodes; i++)
{
GPU_FloydWarshall <<< dimGrid, dimBlock>>>(i, deviceDistanceMatrix, devicePathMatrix, nodes);
err = cudaThreadSynchronize();
if (err != cudaSuccess) cout << "Error" << endl;
}
cudaMemcpy(FWDistanceMatrix, deviceDistanceMatrix, nodes * nodes * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(FWPathMatrix, devicePathMatrix, nodes * nodes * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(devicePathMatrix);
cudaFree(deviceDistanceMatrix);
}
bool loadMatrix(const char * matrixPath) {
string line;
ifstream inFile (matrixPath);
if (!inFile.is_open()) {
cout << "Wrong path to file" << endl;
return false;
}
getline(inFile, line);
nodes = atoi(line.c_str());
FWDistanceMatrix = new int [nodes * nodes];
FWPathMatrix = new int [nodes * nodes];
for (int j = 0; j < nodes; j++)
{
getline(inFile, line);
istringstream is(line);
for (int i = 0; i < nodes; i++)
{
is >> FWDistanceMatrix[j * nodes + i];
if (i != j && FWDistanceMatrix[j * nodes + i] == 0)
FWDistanceMatrix[j * nodes + i] = FW_MAX;
}
}
//initialize predecessors
for ( int i = 0; i < nodes; i++)
for ( int j = 0; j < nodes; j++)
FWPathMatrix[i * nodes + j] = ( FWDistanceMatrix[i * nodes + j] == FW_MAX ? -1 : i);
return true;
}
int main( int argc, const char* argv[] )
{
if ( argc != 2 ) {
cout << "Bad Input.. 1st parameter: Path to file." << endl;
return 1;
}
loadMatrix(argv[1]);
//start of measuring
double wall0 = get_wall_time();
double cpu0 = get_cpu_time();
// Initialize CUDA Event
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
FloydWarshall();
// Finish recording
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Calculate elasped time
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 1000;
printf ("Timee : %f s\n", elapsedTime);
//end of measuring
double wall1 = get_wall_time();
double cpu1 = get_cpu_time();
//TESTPrintMatrix();
//prints results
/* for (int i = 0; i < ncg.getNodes(); i++) {
ncg.FWShortestPathFrom(i);
}*/
cout << "Wall Time = " << wall1 - wall0 << endl;
cout << "CPU Time = " << cpu1 - cpu0 << endl;
return 0;
}
|
de4938dd9157055def20141d7c1c90b7c9c288b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/edit_distance_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void FillFirstRow(T* dist, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N + 1) {
dist[idx] = idx;
}
}
template <typename T>
__global__ void FillFirstColumn(T* dist, const int M, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < M + 1) {
dist[idx * (N + 1)] = idx;
}
}
template <typename T>
__global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2,
const int M, const int N, const int start) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = N;
int index = start + idx * offset;
int row = index / (N + 1);
int col = index % (N + 1);
if (row > 0 && col > 0 && row < M + 1 && col < N + 1) {
int cost = x1[row - 1] == x2[col - 1] ? 0 : 1;
int dels = dist[(row - 1) * (N + 1) + col] + 1;
int ins = dist[row * (N + 1) + col - 1] + 1;
int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost;
dist[index] = min(dels, min(ins, subs));
}
}
template <typename T>
__global__ void SetOutput(T* out, const T* dist, const int M, const int N,
bool normalized) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx == 0) {
out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N];
}
}
template <typename Place, typename T>
class EditDistanceGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* x1_t = ctx.Input<framework::LoDTensor>("Hyps");
auto* x2_t = ctx.Input<framework::LoDTensor>("Refs");
auto* sequence_num = ctx.Output<framework::Tensor>("SequenceNum");
sequence_num->mutable_data<int64_t>(ctx.GetPlace());
auto batch_size = x1_t->dims()[0];
auto normalized = ctx.Attr<bool>("normalized");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
framework::Vector<size_t> hyp_lod(batch_size + 1);
framework::Vector<size_t> ref_lod(batch_size + 1);
bool use_length = ctx.HasInput("HypsLength");
if (use_length) {
// build lod when using padding
auto* hyp_length = ctx.Input<framework::Tensor>("HypsLength");
auto* ref_length = ctx.Input<framework::Tensor>("RefsLength");
framework::Tensor hyp_length_cpu;
framework::Tensor ref_length_cpu;
framework::TensorCopy(*hyp_length, platform::CPUPlace(), &hyp_length_cpu);
framework::TensorCopy(*ref_length, platform::CPUPlace(), &ref_length_cpu);
for (auto i = 0; i < batch_size; i++) {
hyp_lod[i + 1] = hyp_lod[i] + hyp_length_cpu.data<int64_t>()[i];
ref_lod[i + 1] = ref_lod[i] + ref_length_cpu.data<int64_t>()[i];
}
} else {
hyp_lod = x1_t->lod()[0];
ref_lod = x2_t->lod()[0];
}
if (normalized) {
for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1],
"Reference string %d is empty.", i);
}
}
const size_t num_strs = hyp_lod.size() - 1;
math::SetConstant<platform::CUDADeviceContext, int64_t> set_constant;
set_constant(ctx.template device_context<platform::CUDADeviceContext>(),
sequence_num, static_cast<int64_t>(num_strs));
out_t->Resize({static_cast<int64_t>(num_strs), 1});
out_t->mutable_data<T>(ctx.GetPlace());
auto out = out_t->data<T>();
T distance = 0.0;
for (size_t num = 0; num < num_strs; num++) {
auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]);
auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]);
if (m == 0 || n == 0) {
distance = ::max(m, n);
if (normalized) {
distance = distance / n;
}
memory::Copy(BOOST_GET_CONST(Place, ctx.GetPlace()), out + num,
platform::CPUPlace(), &distance, sizeof(T), stream);
} else {
framework::Tensor dist_t;
dist_t.Resize({m + 1, n + 1});
dist_t.mutable_data<T>(ctx.GetPlace());
auto dist = dist_t.data<T>();
auto hyp_offset = use_length ? num * x1_t->dims()[1] : hyp_lod[num];
auto ref_offset = use_length ? num * x2_t->dims()[1] : ref_lod[num];
auto x1 = x1_t->data<int64_t>() + hyp_offset;
auto x2 = x2_t->data<int64_t>() + ref_offset;
hipLaunchKernelGGL(( FillFirstColumn<T>), dim3(1 + m / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, m, n);
hipLaunchKernelGGL(( FillFirstRow<T>), dim3(1 + n / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, n);
// Compute the elements of distance matrix in the anti-diagonal diretion
for (int64_t slice = 2; slice < m + n + 1; ++slice) {
int z_m = slice < m + 1 ? 0 : slice - m;
int z_n = slice < n + 1 ? 0 : slice - n;
int size = slice - (z_m + z_n) + 1; // number of elments in the same
// anti-diagonal line to update
// the start index at which computes from
int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1;
hipLaunchKernelGGL(( Levenshtein<T>), dim3(1 + (size - 1) / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, x1, x2,
m, n, start);
}
hipLaunchKernelGGL(( SetOutput<T>), dim3(1), dim3(1), 0, stream, out + num, dist, m, n, normalized);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
edit_distance,
ops::EditDistanceGPUKernel<paddle::platform::CUDAPlace, float>);
| de4938dd9157055def20141d7c1c90b7c9c288b0.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/edit_distance_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void FillFirstRow(T* dist, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N + 1) {
dist[idx] = idx;
}
}
template <typename T>
__global__ void FillFirstColumn(T* dist, const int M, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < M + 1) {
dist[idx * (N + 1)] = idx;
}
}
template <typename T>
__global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2,
const int M, const int N, const int start) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = N;
int index = start + idx * offset;
int row = index / (N + 1);
int col = index % (N + 1);
if (row > 0 && col > 0 && row < M + 1 && col < N + 1) {
int cost = x1[row - 1] == x2[col - 1] ? 0 : 1;
int dels = dist[(row - 1) * (N + 1) + col] + 1;
int ins = dist[row * (N + 1) + col - 1] + 1;
int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost;
dist[index] = min(dels, min(ins, subs));
}
}
template <typename T>
__global__ void SetOutput(T* out, const T* dist, const int M, const int N,
bool normalized) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx == 0) {
out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N];
}
}
template <typename Place, typename T>
class EditDistanceGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* x1_t = ctx.Input<framework::LoDTensor>("Hyps");
auto* x2_t = ctx.Input<framework::LoDTensor>("Refs");
auto* sequence_num = ctx.Output<framework::Tensor>("SequenceNum");
sequence_num->mutable_data<int64_t>(ctx.GetPlace());
auto batch_size = x1_t->dims()[0];
auto normalized = ctx.Attr<bool>("normalized");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
framework::Vector<size_t> hyp_lod(batch_size + 1);
framework::Vector<size_t> ref_lod(batch_size + 1);
bool use_length = ctx.HasInput("HypsLength");
if (use_length) {
// build lod when using padding
auto* hyp_length = ctx.Input<framework::Tensor>("HypsLength");
auto* ref_length = ctx.Input<framework::Tensor>("RefsLength");
framework::Tensor hyp_length_cpu;
framework::Tensor ref_length_cpu;
framework::TensorCopy(*hyp_length, platform::CPUPlace(), &hyp_length_cpu);
framework::TensorCopy(*ref_length, platform::CPUPlace(), &ref_length_cpu);
for (auto i = 0; i < batch_size; i++) {
hyp_lod[i + 1] = hyp_lod[i] + hyp_length_cpu.data<int64_t>()[i];
ref_lod[i + 1] = ref_lod[i] + ref_length_cpu.data<int64_t>()[i];
}
} else {
hyp_lod = x1_t->lod()[0];
ref_lod = x2_t->lod()[0];
}
if (normalized) {
for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1],
"Reference string %d is empty.", i);
}
}
const size_t num_strs = hyp_lod.size() - 1;
math::SetConstant<platform::CUDADeviceContext, int64_t> set_constant;
set_constant(ctx.template device_context<platform::CUDADeviceContext>(),
sequence_num, static_cast<int64_t>(num_strs));
out_t->Resize({static_cast<int64_t>(num_strs), 1});
out_t->mutable_data<T>(ctx.GetPlace());
auto out = out_t->data<T>();
T distance = 0.0;
for (size_t num = 0; num < num_strs; num++) {
auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]);
auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]);
if (m == 0 || n == 0) {
distance = std::max(m, n);
if (normalized) {
distance = distance / n;
}
memory::Copy(BOOST_GET_CONST(Place, ctx.GetPlace()), out + num,
platform::CPUPlace(), &distance, sizeof(T), stream);
} else {
framework::Tensor dist_t;
dist_t.Resize({m + 1, n + 1});
dist_t.mutable_data<T>(ctx.GetPlace());
auto dist = dist_t.data<T>();
auto hyp_offset = use_length ? num * x1_t->dims()[1] : hyp_lod[num];
auto ref_offset = use_length ? num * x2_t->dims()[1] : ref_lod[num];
auto x1 = x1_t->data<int64_t>() + hyp_offset;
auto x2 = x2_t->data<int64_t>() + ref_offset;
FillFirstColumn<T><<<1 + m / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, m, n);
FillFirstRow<T><<<1 + n / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, n);
// Compute the elements of distance matrix in the anti-diagonal diretion
for (int64_t slice = 2; slice < m + n + 1; ++slice) {
int z_m = slice < m + 1 ? 0 : slice - m;
int z_n = slice < n + 1 ? 0 : slice - n;
int size = slice - (z_m + z_n) + 1; // number of elments in the same
// anti-diagonal line to update
// the start index at which computes from
int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1;
Levenshtein<T><<<1 + (size - 1) / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, x1, x2,
m, n, start);
}
SetOutput<T><<<1, 1, 0, stream>>>(out + num, dist, m, n, normalized);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
edit_distance,
ops::EditDistanceGPUKernel<paddle::platform::CUDAPlace, float>);
|
30c8696f9e1361aa575cf32916e4c95331f6b12b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_debug.cu.h"
#include "fs_initializer.cu.h"
// INCLUDING CODE INLINE - change later
#include "host_loop.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <math.h>
#define MAIN_FS_FILE
#include "cp.hip"
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
char* update_filename(const char* h_filename){
int n=strlen(h_filename);
assert(n>0);
if (n>FILENAME_SIZE) {
fprintf(stderr,"Filname %s too long, should be only %d symbols including \\0",h_filename,FILENAME_SIZE);
exit (-1);
}
char* d_filename;
CUDA_SAFE_CALL(hipMalloc(&d_filename,n+1));
CUDA_SAFE_CALL(hipMemcpy(d_filename, h_filename, n+1,hipMemcpyHostToDevice));
return d_filename;
}
#include <assert.h>
// size of the output used for data staging
int output_size=FS_BLOCKSIZE;
#define MAX_TRIALS (10)
double time_res[MAX_TRIALS];
double match_threshold;
int global_devicenum;
int main( int argc, char** argv)
{
char* threshold=getenv("GREPTH");
match_threshold=0.01;
if(threshold!=0) match_threshold=strtof(threshold,NULL);
fprintf(stderr,"Match threshold is %f\n",match_threshold);
char* gpudev=getenv("GPUDEVICE");
global_devicenum=0;
if (gpudev!=NULL) global_devicenum=atoi(gpudev);
fprintf(stderr,"GPU device chosen %d\n",global_devicenum);
CUDA_SAFE_CALL(hipSetDevice(global_devicenum));
if(argc<5) {
fprintf(stderr,"<kernel_iterations> <blocks> <threads> f1 f2 ... f_#files\n");
return -1;
}
int trials=atoi(argv[1]);
assert(trials<=MAX_TRIALS);
int nblocks=atoi(argv[2]);
int nthreads=atoi(argv[3]);
fprintf(stderr," iterations: %d blocks %d threads %d\n",trials, nblocks, nthreads);
int num_files=argc-1-3;
char** d_filenames=NULL;
double total_time=0;
// int scratch_size=128*1024*1024*4;
size_t total_size;
std::memset(time_res,0,MAX_TRIALS*sizeof(double));
for(int i=1;i<trials+1;i++){
volatile GPUGlobals* gpuGlobals;
initializer(&gpuGlobals);
init_device_app();
init_app();
if (num_files>0){
d_filenames=(char**)malloc(sizeof(char*)*num_files);
for(int i=0;i<num_files;i++){
d_filenames[i]=update_filename(argv[i+4]);
fprintf(stderr,"file -%s\n",argv[i+4]);
}
}
double time_before=_timestamp();
if (!i) time_before=0;
// vector, matrix, out
double c_open, c_rw, c_close;
c_open=c_rw=c_close=0;
hipLaunchKernelGGL(( test_cpy), dim3(nblocks),dim3(nthreads),0,gpuGlobals->streamMgr->kernelStream, d_filenames[0], d_filenames[1]);
run_gpufs_handler(gpuGlobals,global_devicenum);
hipError_t error = hipDeviceSynchronize();
double time_after=_timestamp();
if(!i) time_after=0;
total_time+=(time_after-time_before);
if (i>0) {time_res[i]=time_after-time_before;
fprintf(stderr," t-%.3f-us\n",time_res[i]);
}
fprintf(stderr, "open: %.0f, rw %.0f, close %.0f usec\n",c_open,c_rw,c_close);
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != hipSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", hipGetErrorString(error));
}
//PRINT_DEBUG;
fprintf(stderr,"\n");
delete gpuGlobals;
PRINT_MALLOC;
PRINT_FREE;
PRINT_PAGE_ALLOC_RETRIES;
PRINT_LOCKLESS_SUCCESS;
PRINT_WRONG_FILE_ID;
PRINT_RT_MALLOC;
PRINT_RT_FREE;
PRINT_HT_MISS;
PRINT_PRECLOSE_PUSH;
PRINT_PRECLOSE_FETCH;
PRINT_HT_HIT;
PRINT_FLUSHED_READ;
PRINT_FLUSHED_WRITE;
PRINT_TRY_LOCK_FAILED;
// hipFree(d_output);
hipDeviceReset();
if(error) break;
}
if (d_filenames) free(d_filenames);
double thpt=post_app(total_time,trials);
struct stat s1,s2,s3;
if (stat(argv[4],&s1)) perror("stat failed");
if (stat(argv[5],&s2)) perror("stat failed");
if (stat(argv[6],&s3)) perror("stat failed");
total_size=s1.st_size+s2.st_size+s3.st_size;
double d_size=total_size/1024.0/1024.0/1024.0;
double avg_time,avg_thpt,std_time,std_thpt;
stdavg(&avg_time,&avg_thpt, &std_time, &std_thpt, time_res, d_size, MAX_TRIALS);
fprintf(stderr,"Performance: %.3f usec +/- %.3f, %.3f GB, %.3f GB/s +/- %.3f, FS_BLOCKSIZE %d FS_LOGBLOCKSIZE %d\n",avg_time,std_time, d_size,
avg_thpt*1e6,std_thpt*1e6,FS_BLOCKSIZE, FS_LOGBLOCKSIZE );
//((double)output_size*(double)nblocks*(double)read_count)/(total_time/TRIALS)/1e3 );
return 0;
}
| 30c8696f9e1361aa575cf32916e4c95331f6b12b.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_debug.cu.h"
#include "fs_initializer.cu.h"
// INCLUDING CODE INLINE - change later
#include "host_loop.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <math.h>
#define MAIN_FS_FILE
#include "cp.cu"
void stdavg(double *avg_time, double *avg_thpt, double* std_time, double *std_thpt, const double* times, const double total_data, int arr_len)
{
*avg_time=*avg_thpt=*std_time=*std_thpt=0;
int counter=0;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*avg_time+=times[i];
*avg_thpt+=((double)total_data)/times[i];
counter++;
}
if (counter==0) return;
*avg_time/=(double)counter;
*avg_thpt/=(double)counter;
for( int i=0;i<arr_len;i++){
if (times[i]<=0) continue;
*std_time=(times[i]-*avg_time)*(times[i]-*avg_time);
double tmp=(((double)total_data)/times[i])-*avg_thpt;
*std_thpt=tmp*tmp;
}
*std_time/=(double)counter;
*std_thpt/=(double)counter;
*std_time=sqrt(*std_time);
*std_thpt=sqrt(*std_thpt);
}
char* update_filename(const char* h_filename){
int n=strlen(h_filename);
assert(n>0);
if (n>FILENAME_SIZE) {
fprintf(stderr,"Filname %s too long, should be only %d symbols including \\0",h_filename,FILENAME_SIZE);
exit (-1);
}
char* d_filename;
CUDA_SAFE_CALL(cudaMalloc(&d_filename,n+1));
CUDA_SAFE_CALL(cudaMemcpy(d_filename, h_filename, n+1,cudaMemcpyHostToDevice));
return d_filename;
}
#include <assert.h>
// size of the output used for data staging
int output_size=FS_BLOCKSIZE;
#define MAX_TRIALS (10)
double time_res[MAX_TRIALS];
double match_threshold;
int global_devicenum;
int main( int argc, char** argv)
{
char* threshold=getenv("GREPTH");
match_threshold=0.01;
if(threshold!=0) match_threshold=strtof(threshold,NULL);
fprintf(stderr,"Match threshold is %f\n",match_threshold);
char* gpudev=getenv("GPUDEVICE");
global_devicenum=0;
if (gpudev!=NULL) global_devicenum=atoi(gpudev);
fprintf(stderr,"GPU device chosen %d\n",global_devicenum);
CUDA_SAFE_CALL(cudaSetDevice(global_devicenum));
if(argc<5) {
fprintf(stderr,"<kernel_iterations> <blocks> <threads> f1 f2 ... f_#files\n");
return -1;
}
int trials=atoi(argv[1]);
assert(trials<=MAX_TRIALS);
int nblocks=atoi(argv[2]);
int nthreads=atoi(argv[3]);
fprintf(stderr," iterations: %d blocks %d threads %d\n",trials, nblocks, nthreads);
int num_files=argc-1-3;
char** d_filenames=NULL;
double total_time=0;
// int scratch_size=128*1024*1024*4;
size_t total_size;
std::memset(time_res,0,MAX_TRIALS*sizeof(double));
for(int i=1;i<trials+1;i++){
volatile GPUGlobals* gpuGlobals;
initializer(&gpuGlobals);
init_device_app();
init_app();
if (num_files>0){
d_filenames=(char**)malloc(sizeof(char*)*num_files);
for(int i=0;i<num_files;i++){
d_filenames[i]=update_filename(argv[i+4]);
fprintf(stderr,"file -%s\n",argv[i+4]);
}
}
double time_before=_timestamp();
if (!i) time_before=0;
// vector, matrix, out
double c_open, c_rw, c_close;
c_open=c_rw=c_close=0;
test_cpy<<<nblocks,nthreads,0,gpuGlobals->streamMgr->kernelStream>>>(d_filenames[0], d_filenames[1]);
run_gpufs_handler(gpuGlobals,global_devicenum);
cudaError_t error = cudaDeviceSynchronize();
double time_after=_timestamp();
if(!i) time_after=0;
total_time+=(time_after-time_before);
if (i>0) {time_res[i]=time_after-time_before;
fprintf(stderr," t-%.3f-us\n",time_res[i]);
}
fprintf(stderr, "open: %.0f, rw %.0f, close %.0f usec\n",c_open,c_rw,c_close);
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != cudaSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", cudaGetErrorString(error));
}
//PRINT_DEBUG;
fprintf(stderr,"\n");
delete gpuGlobals;
PRINT_MALLOC;
PRINT_FREE;
PRINT_PAGE_ALLOC_RETRIES;
PRINT_LOCKLESS_SUCCESS;
PRINT_WRONG_FILE_ID;
PRINT_RT_MALLOC;
PRINT_RT_FREE;
PRINT_HT_MISS;
PRINT_PRECLOSE_PUSH;
PRINT_PRECLOSE_FETCH;
PRINT_HT_HIT;
PRINT_FLUSHED_READ;
PRINT_FLUSHED_WRITE;
PRINT_TRY_LOCK_FAILED;
// cudaFree(d_output);
cudaDeviceReset();
if(error) break;
}
if (d_filenames) free(d_filenames);
double thpt=post_app(total_time,trials);
struct stat s1,s2,s3;
if (stat(argv[4],&s1)) perror("stat failed");
if (stat(argv[5],&s2)) perror("stat failed");
if (stat(argv[6],&s3)) perror("stat failed");
total_size=s1.st_size+s2.st_size+s3.st_size;
double d_size=total_size/1024.0/1024.0/1024.0;
double avg_time,avg_thpt,std_time,std_thpt;
stdavg(&avg_time,&avg_thpt, &std_time, &std_thpt, time_res, d_size, MAX_TRIALS);
fprintf(stderr,"Performance: %.3f usec +/- %.3f, %.3f GB, %.3f GB/s +/- %.3f, FS_BLOCKSIZE %d FS_LOGBLOCKSIZE %d\n",avg_time,std_time, d_size,
avg_thpt*1e6,std_thpt*1e6,FS_BLOCKSIZE, FS_LOGBLOCKSIZE );
//((double)output_size*(double)nblocks*(double)read_count)/(total_time/TRIALS)/1e3 );
return 0;
}
|
3cafc6aea2f2b77de7c590c8d37ba6b68451f933.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4")
{
assert(w == h); // specific rippling
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
hipLaunchKernelGGL(( rippling) , dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t);
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 3cafc6aea2f2b77de7c590c8d37ba6b68451f933.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4")
{
assert(w == h); // specific rippling
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
rippling <<<dg,db>>>(ptrDevPixels,w,h,t);
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
f839c3e6a08b0c99367101e70a2511073891af7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaParticleRotation.hh"
#include "kernelfuncs.h"
#include "kerneltemplate.hh"
#include <iostream>
cudaParticleRotation::~cudaParticleRotation() {
if (w!=NULL) hipFree(w);
if (m0inv!=NULL) hipFree(m0inv);
if (L!=NULL) hipFree(L);
if (T!=NULL) hipFree(T);
}
void cudaParticleRotation::setup(int n) {
hipMalloc((void **)&w, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc w[] on GPU");
hipMalloc((void **)&m0inv, sizeof(real)*N);
if (withInfo) ErrorInfo("malloc m0inv[] on GPU");
hipMalloc((void **)&L, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc L[] on GPU");
hipMalloc((void **)&T, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc T[] on GPU");
}
void cudaParticleRotation::TimeEvolution(real dt) {
hipLaunchKernelGGL(( propagateEulerianRotation_F4), dim3(MPnum), dim3(THnum1D), 0, 0, w, dt, L, T, m0inv, move, N);
if (withInfo) ErrorInfo("do TimeEvolution Eulerian Equation of Motion");
}
void cudaParticleRotation::setInertia(const std::vector<real> &_r0) {
hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, w, N);
hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, L, N);
hipMemcpy(m0inv, minv, sizeof(real)*N, hipMemcpyDeviceToDevice);
std::vector<real> r1(N);
for (int i=0;i<N;++i)
r1[i] = 1/(0.4 * _r0[i] * _r0[i]);
hipMemcpy(tmp3N, &(r1[0]), sizeof(real)*N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( multiplies), dim3(MPnum), dim3(THnum1D), 0, 0, m0inv, reinterpret_cast<real *>(tmp3N), N);
if (withInfo) ErrorInfo("set Initial Inertia");
}
void cudaParticleRotation::setInertia(real r0_all) {
hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, w, N);
hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, L, N);
hipMemcpy(m0inv, minv, sizeof(real)*N, hipMemcpyDeviceToDevice);
real r1 = 1/(0.4 * r0_all * r0_all);
hipLaunchKernelGGL(( mulArray), dim3(MPnum), dim3(THnum1D), 0, 0, m0inv, r1, N);
if (withInfo) ErrorInfo("set Initial Inertia");
}
| f839c3e6a08b0c99367101e70a2511073891af7c.cu | #include "cudaParticleRotation.hh"
#include "kernelfuncs.h"
#include "kerneltemplate.hh"
#include <iostream>
cudaParticleRotation::~cudaParticleRotation() {
if (w!=NULL) cudaFree(w);
if (m0inv!=NULL) cudaFree(m0inv);
if (L!=NULL) cudaFree(L);
if (T!=NULL) cudaFree(T);
}
void cudaParticleRotation::setup(int n) {
cudaMalloc((void **)&w, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc w[] on GPU");
cudaMalloc((void **)&m0inv, sizeof(real)*N);
if (withInfo) ErrorInfo("malloc m0inv[] on GPU");
cudaMalloc((void **)&L, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc L[] on GPU");
cudaMalloc((void **)&T, sizeof(float4)*N);
if (withInfo) ErrorInfo("malloc T[] on GPU");
}
void cudaParticleRotation::TimeEvolution(real dt) {
propagateEulerianRotation_F4<<<MPnum, THnum1D>>>(w, dt, L, T, m0inv, move, N);
if (withInfo) ErrorInfo("do TimeEvolution Eulerian Equation of Motion");
}
void cudaParticleRotation::setInertia(const std::vector<real> &_r0) {
clearArray_F4<<<MPnum, THnum1D>>>(w, N);
clearArray_F4<<<MPnum, THnum1D>>>(L, N);
cudaMemcpy(m0inv, minv, sizeof(real)*N, cudaMemcpyDeviceToDevice);
std::vector<real> r1(N);
for (int i=0;i<N;++i)
r1[i] = 1/(0.4 * _r0[i] * _r0[i]);
cudaMemcpy(tmp3N, &(r1[0]), sizeof(real)*N, cudaMemcpyHostToDevice);
multiplies<<<MPnum, THnum1D>>>(m0inv, reinterpret_cast<real *>(tmp3N), N);
if (withInfo) ErrorInfo("set Initial Inertia");
}
void cudaParticleRotation::setInertia(real r0_all) {
clearArray_F4<<<MPnum, THnum1D>>>(w, N);
clearArray_F4<<<MPnum, THnum1D>>>(L, N);
cudaMemcpy(m0inv, minv, sizeof(real)*N, cudaMemcpyDeviceToDevice);
real r1 = 1/(0.4 * r0_all * r0_all);
mulArray<<<MPnum, THnum1D>>>(m0inv, r1, N);
if (withInfo) ErrorInfo("set Initial Inertia");
}
|
efb498d694792d8afee9d73f2fbb1975453c8cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/operator_c10wrapper.h"
#include "caffe2/operators/layer_norm_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ComputeSigmaAndFusedParamsCUDAKernel(
const int N,
const T eps,
const T* mean,
const T* var,
T* sigma,
T* scale,
T* bias);
#define DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(T, RsqrtFunc) \
template <> \
__global__ void ComputeSigmaAndFusedParamsCUDAKernel<T>( \
const int N, \
const T eps, \
const T* mean, \
const T* var, \
T* sigma, \
T* scale, \
T* bias) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < N) { \
const T rstd = RsqrtFunc(var[index] + eps); \
sigma[index] = rstd * (var[index] + eps); \
scale[index] = rstd; \
bias[index] = -rstd * mean[index]; \
} \
}
DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(float, rsqrtf)
DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(double, rsqrt)
#undef DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
const T* gamma,
const T* beta,
T* Y);
#define DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(T, FmaFunc) \
template <> \
__global__ void LayerNormForwardCUDAKernel<T>( \
const int M, \
const int N, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < M * N) { \
const int i = index / N; \
Y[index] = FmaFunc(X[index], scale[i], bias[i]); \
} \
} \
template <> \
__global__ void LayerNormForwardCUDAKernel<T>( \
const int M, \
const int N, \
const T* X, \
const T* scale, \
const T* bias, \
const T* gamma, \
const T* beta, \
T* Y) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < M * N) { \
const int i = index / N; \
const int j = index % N; \
Y[index] = \
FmaFunc(FmaFunc(X[index], scale[i], bias[i]), gamma[j], beta[j]); \
} \
}
DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(float, fmaf)
DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(double, fma)
#undef DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
const int M,
const int N,
const T* dY,
const T* X,
T* ds,
T* db) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
const int i = blockIdx.x;
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const int index = i * N + j;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(dY + index);
#else
ds_val += dY[index] * X[index];
db_val += dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Sum(ds_val);
db_val = BlockReduce<T>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int M,
const int N,
const T* mean,
const T* sig,
const T* ds,
const T* db,
T* dY_scale,
T* X_scale,
T* bias);
template <>
__global__ void ComputeFusedParamsCUDAKernel<float>(
const int M,
const int N,
const float* mean,
const float* sig,
const float* ds,
const float* db,
float* dY_scale,
float* X_scale,
float* bias) {
const float scale = 1.0f / static_cast<float>(N);
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < M) {
#if __CUDA_ARCH__ >= 350
const float rsig = 1.0f / __ldg(sig + index);
const float X_scale_val =
fmaf(__ldg(db + index), __ldg(mean + index), -__ldg(ds + index)) *
math::utils::Cube<float>(rsig) * scale;
dY_scale[index] = rsig;
X_scale[index] = X_scale_val;
bias[index] = -fmaf(
X_scale_val, __ldg(mean + index), __ldg(db + index) * rsig * scale);
#else
const float rsig = 1.0f / sig[index];
const float X_scale_val = fmaf(db[index], mean[index], -ds[index]) *
math::utils::Cube<float>(rsig) * scale;
dY_scale[index] = rsig;
X_scale[index] = X_scale_val;
bias[index] = -fmaf(X_scale_val, mean[index], db[index] * rsig * scale);
#endif
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
const int M,
const int N,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX);
template <>
__global__ void LayerNormBackwardCUDAKenrel<float>(
const int M,
const int N,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int size = M * N;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < size) {
const int i = index / N;
#if __CUDA_ARCH__ >= 350
dX[index] = fmaf(
__ldg(dY + index),
__ldg(dY_scale + i),
fmaf(__ldg(X + index), __ldg(X_scale + i), __ldg(bias + i)));
#else
dX[index] =
fmaf(dY[index], dY_scale[i], fmaf(X[index], X_scale[i], bias[i]));
#endif
}
}
} // namespace
template <>
template <typename T>
void LayerNormOp<CUDAContext>::ComputeSigmaAndFusedParams(
const int N,
const float eps,
const T* mean,
const T* var,
T* sigma,
T* scale,
T* bias,
CUDAContext* context) {
if (N > 0) {
const int M = math::DivUp(N, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( ComputeSigmaAndFusedParamsCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
N, static_cast<T>(eps), mean, var, sigma, scale, bias);
}
}
template <>
template <typename T>
void LayerNormOp<CUDAContext>::LayerNormForward(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
const T* gamma,
const T* beta,
T* Y,
CUDAContext* context) {
if (M * N > 0) {
const int K = math::DivUp(M * N, CAFFE_CUDA_NUM_THREADS);
if (gamma != nullptr && beta != nullptr) {
hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>)
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
M, N, X, scale, bias, gamma, beta, Y);
} else {
CAFFE_ENFORCE(gamma == nullptr);
CAFFE_ENFORCE(beta == nullptr);
hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>)
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
M, N, X, scale, bias, Y);
}
}
}
REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>);
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients(
const int M,
const int N,
const T* dY,
const T* X,
T* ds,
T* db) {
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
M, N, dY, X, ds, db);
}
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::ComputeFusedParams(
const int M,
const int N,
const T* mean,
const T* sig,
const T* ds,
const T* db,
T* dY_scale,
T* X_scale,
T* bias) {
const int K = math::DivUp(M, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>)
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
M, N, mean, sig, ds, db, dY_scale, X_scale, bias);
}
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::LayerNormBackward(
const int M,
const int N,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX) {
const int K = math::DivUp(M * N, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>)
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
M, N, dY_scale, dY, X_scale, X, bias, dX);
}
REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>);
} // namespace caffe2
C10_REGISTER_CAFFE2_OPERATOR_CUDA(
LayerNorm,
caffe2::LayerNormOp<caffe2::CUDAContext>)
namespace caffe2 {
REGISTER_C10_OPERATOR_FOR_CAFFE2_DISPATCH_CUDA(
"_caffe2::LayerNorm",
C10LayerNorm_DontUseThisOpYet);
} // namespace caffe2
| efb498d694792d8afee9d73f2fbb1975453c8cc2.cu | #include "caffe2/core/operator_c10wrapper.h"
#include "caffe2/operators/layer_norm_op.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ComputeSigmaAndFusedParamsCUDAKernel(
const int N,
const T eps,
const T* mean,
const T* var,
T* sigma,
T* scale,
T* bias);
#define DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(T, RsqrtFunc) \
template <> \
__global__ void ComputeSigmaAndFusedParamsCUDAKernel<T>( \
const int N, \
const T eps, \
const T* mean, \
const T* var, \
T* sigma, \
T* scale, \
T* bias) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < N) { \
const T rstd = RsqrtFunc(var[index] + eps); \
sigma[index] = rstd * (var[index] + eps); \
scale[index] = rstd; \
bias[index] = -rstd * mean[index]; \
} \
}
DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(float, rsqrtf)
DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL(double, rsqrt)
#undef DELEGATE_COMPUTE_SIGMA_AND_FUSED_PARAMS_CUDA_KERNEL
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
const T* gamma,
const T* beta,
T* Y);
#define DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(T, FmaFunc) \
template <> \
__global__ void LayerNormForwardCUDAKernel<T>( \
const int M, \
const int N, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < M * N) { \
const int i = index / N; \
Y[index] = FmaFunc(X[index], scale[i], bias[i]); \
} \
} \
template <> \
__global__ void LayerNormForwardCUDAKernel<T>( \
const int M, \
const int N, \
const T* X, \
const T* scale, \
const T* bias, \
const T* gamma, \
const T* beta, \
T* Y) { \
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (index < M * N) { \
const int i = index / N; \
const int j = index % N; \
Y[index] = \
FmaFunc(FmaFunc(X[index], scale[i], bias[i]), gamma[j], beta[j]); \
} \
}
DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(float, fmaf)
DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL(double, fma)
#undef DELEGATE_LAYER_NORM_FORWARD_CUDA_KERNEL
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
const int M,
const int N,
const T* dY,
const T* X,
T* ds,
T* db) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
const int i = blockIdx.x;
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const int index = i * N + j;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(dY + index);
#else
ds_val += dY[index] * X[index];
db_val += dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Sum(ds_val);
db_val = BlockReduce<T>(db_storage).Sum(db_val);
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
const int M,
const int N,
const T* mean,
const T* sig,
const T* ds,
const T* db,
T* dY_scale,
T* X_scale,
T* bias);
template <>
__global__ void ComputeFusedParamsCUDAKernel<float>(
const int M,
const int N,
const float* mean,
const float* sig,
const float* ds,
const float* db,
float* dY_scale,
float* X_scale,
float* bias) {
const float scale = 1.0f / static_cast<float>(N);
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < M) {
#if __CUDA_ARCH__ >= 350
const float rsig = 1.0f / __ldg(sig + index);
const float X_scale_val =
fmaf(__ldg(db + index), __ldg(mean + index), -__ldg(ds + index)) *
math::utils::Cube<float>(rsig) * scale;
dY_scale[index] = rsig;
X_scale[index] = X_scale_val;
bias[index] = -fmaf(
X_scale_val, __ldg(mean + index), __ldg(db + index) * rsig * scale);
#else
const float rsig = 1.0f / sig[index];
const float X_scale_val = fmaf(db[index], mean[index], -ds[index]) *
math::utils::Cube<float>(rsig) * scale;
dY_scale[index] = rsig;
X_scale[index] = X_scale_val;
bias[index] = -fmaf(X_scale_val, mean[index], db[index] * rsig * scale);
#endif
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
const int M,
const int N,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX);
template <>
__global__ void LayerNormBackwardCUDAKenrel<float>(
const int M,
const int N,
const float* dY_scale,
const float* dY,
const float* X_scale,
const float* X,
const float* bias,
float* dX) {
const int size = M * N;
const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (index < size) {
const int i = index / N;
#if __CUDA_ARCH__ >= 350
dX[index] = fmaf(
__ldg(dY + index),
__ldg(dY_scale + i),
fmaf(__ldg(X + index), __ldg(X_scale + i), __ldg(bias + i)));
#else
dX[index] =
fmaf(dY[index], dY_scale[i], fmaf(X[index], X_scale[i], bias[i]));
#endif
}
}
} // namespace
template <>
template <typename T>
void LayerNormOp<CUDAContext>::ComputeSigmaAndFusedParams(
const int N,
const float eps,
const T* mean,
const T* var,
T* sigma,
T* scale,
T* bias,
CUDAContext* context) {
if (N > 0) {
const int M = math::DivUp(N, CAFFE_CUDA_NUM_THREADS);
ComputeSigmaAndFusedParamsCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
N, static_cast<T>(eps), mean, var, sigma, scale, bias);
}
}
template <>
template <typename T>
void LayerNormOp<CUDAContext>::LayerNormForward(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
const T* gamma,
const T* beta,
T* Y,
CUDAContext* context) {
if (M * N > 0) {
const int K = math::DivUp(M * N, CAFFE_CUDA_NUM_THREADS);
if (gamma != nullptr && beta != nullptr) {
LayerNormForwardCUDAKernel<T>
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
M, N, X, scale, bias, gamma, beta, Y);
} else {
CAFFE_ENFORCE(gamma == nullptr);
CAFFE_ENFORCE(beta == nullptr);
LayerNormForwardCUDAKernel<T>
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
M, N, X, scale, bias, Y);
}
}
}
REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>);
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients(
const int M,
const int N,
const T* dY,
const T* X,
T* ds,
T* db) {
ComputeInternalGradientsCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
M, N, dY, X, ds, db);
}
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::ComputeFusedParams(
const int M,
const int N,
const T* mean,
const T* sig,
const T* ds,
const T* db,
T* dY_scale,
T* X_scale,
T* bias) {
const int K = math::DivUp(M, CAFFE_CUDA_NUM_THREADS);
ComputeFusedParamsCUDAKernel<T>
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
M, N, mean, sig, ds, db, dY_scale, X_scale, bias);
}
template <>
template <typename T>
void LayerNormGradientOp<CUDAContext>::LayerNormBackward(
const int M,
const int N,
const T* dY_scale,
const T* dY,
const T* X_scale,
const T* X,
const T* bias,
T* dX) {
const int K = math::DivUp(M * N, CAFFE_CUDA_NUM_THREADS);
LayerNormBackwardCUDAKenrel<T>
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
M, N, dY_scale, dY, X_scale, X, bias, dX);
}
REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>);
} // namespace caffe2
C10_REGISTER_CAFFE2_OPERATOR_CUDA(
LayerNorm,
caffe2::LayerNormOp<caffe2::CUDAContext>)
namespace caffe2 {
REGISTER_C10_OPERATOR_FOR_CAFFE2_DISPATCH_CUDA(
"_caffe2::LayerNorm",
C10LayerNorm_DontUseThisOpYet);
} // namespace caffe2
|
fbe257f29ba82f0998af946a277c34754f4beb9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaWarp.h"
// gpuIntrinsicWarp
template<typename T>
__global__ void gpuIntrinsicWarp( T* input, T* output, int width, int height,
float2 focalLength, float2 principalPoint, float k1, float k2, float p1, float p2)
{
const int2 uv_out = make_int2(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
if( uv_out.x >= width || uv_out.y >= height )
return;
const float u = uv_out.x;
const float v = uv_out.y;
const float _fx = 1.0f / focalLength.x;
const float _fy = 1.0f / focalLength.y;
const float y = (v - principalPoint.y)*_fy;
const float y2 = y*y;
const float _2p1y = 2.0*p1*y;
const float _3p1y2 = 3.0*p1*y2;
const float p2y2 = p2*y2;
const float x = (u - principalPoint.x)*_fx;
const float x2 = x*x;
const float r2 = x2 + y2;
const float d = 1.0 + (k1 + k2*r2)*r2;
const float _u = focalLength.x*(x*(d + _2p1y) + p2y2 + (3.0*p2)*x2) + principalPoint.x;
const float _v = focalLength.y*(y*(d + (2.0*p2)*x) + _3p1y2 + p1*x2) + principalPoint.y;
const int2 uv_in = make_int2( _u, _v );
if( uv_in.x >= width || uv_in.y >= height || uv_in.x < 0 || uv_in.y < 0 )
return;
output[uv_out.y * width + uv_out.x] = input[uv_in.y * width + uv_in.x];
}
// cudaWarpIntrinsic
hipError_t cudaWarpIntrinsic( uchar4* input, uchar4* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuIntrinsicWarp), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(hipGetLastError());
}
// cudaWarpIntrinsic
hipError_t cudaWarpIntrinsic( float4* input, float4* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuIntrinsicWarp), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(hipGetLastError());
}
hipError_t cudaWarpIntrinsic(ushort1* input, ushort1* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion)
{
if (!input || !output)
return hipErrorInvalidDevicePointer;
if (width == 0 || height == 0)
return hipErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y));
hipLaunchKernelGGL(( gpuIntrinsicWarp), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(hipGetLastError());
}
| fbe257f29ba82f0998af946a277c34754f4beb9f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaWarp.h"
// gpuIntrinsicWarp
template<typename T>
__global__ void gpuIntrinsicWarp( T* input, T* output, int width, int height,
float2 focalLength, float2 principalPoint, float k1, float k2, float p1, float p2)
{
const int2 uv_out = make_int2(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
if( uv_out.x >= width || uv_out.y >= height )
return;
const float u = uv_out.x;
const float v = uv_out.y;
const float _fx = 1.0f / focalLength.x;
const float _fy = 1.0f / focalLength.y;
const float y = (v - principalPoint.y)*_fy;
const float y2 = y*y;
const float _2p1y = 2.0*p1*y;
const float _3p1y2 = 3.0*p1*y2;
const float p2y2 = p2*y2;
const float x = (u - principalPoint.x)*_fx;
const float x2 = x*x;
const float r2 = x2 + y2;
const float d = 1.0 + (k1 + k2*r2)*r2;
const float _u = focalLength.x*(x*(d + _2p1y) + p2y2 + (3.0*p2)*x2) + principalPoint.x;
const float _v = focalLength.y*(y*(d + (2.0*p2)*x) + _3p1y2 + p1*x2) + principalPoint.y;
const int2 uv_in = make_int2( _u, _v );
if( uv_in.x >= width || uv_in.y >= height || uv_in.x < 0 || uv_in.y < 0 )
return;
output[uv_out.y * width + uv_out.x] = input[uv_in.y * width + uv_in.x];
}
// cudaWarpIntrinsic
cudaError_t cudaWarpIntrinsic( uchar4* input, uchar4* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuIntrinsicWarp<<<gridDim, blockDim>>>(input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(cudaGetLastError());
}
// cudaWarpIntrinsic
cudaError_t cudaWarpIntrinsic( float4* input, float4* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuIntrinsicWarp<<<gridDim, blockDim>>>(input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(cudaGetLastError());
}
cudaError_t cudaWarpIntrinsic(ushort1* input, ushort1* output, uint32_t width, uint32_t height,
const float2& focalLength, const float2& principalPoint, const float4& distortion)
{
if (!input || !output)
return cudaErrorInvalidDevicePointer;
if (width == 0 || height == 0)
return cudaErrorInvalidValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y));
gpuIntrinsicWarp<<<gridDim, blockDim>>>(input, output, width, height,
focalLength, principalPoint,
distortion.x, distortion.y, distortion.z, distortion.w);
return CUDA(cudaGetLastError());
}
|
a6c0f63618c198026ccf7d288ea8056d57151f30.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
#include "vecAdd.h"
static const size_t N = 20;
static size_t threads = 256;
static size_t blocks = (N - 1) / threads + 1;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(hipMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(hipMemcpy(dl, l, N * sizeof(int), hipMemcpyHostToDevice));
RUNTIME_API_CALL(hipMemcpy(dr, r, N * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, 0, N)));
RUNTIME_API_CALL(hipMemcpy(p, dp, N * sizeof(int), hipMemcpyDeviceToHost));
RUNTIME_API_CALL(hipFree(dl));
RUNTIME_API_CALL(hipFree(dr));
RUNTIME_API_CALL(hipFree(dp));
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
}
hipDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
| a6c0f63618c198026ccf7d288ea8056d57151f30.cu | #include <cstdio>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
#include "vecAdd.h"
static const size_t N = 20;
static size_t threads = 256;
static size_t blocks = (N - 1) / threads + 1;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(cudaMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(cudaMemcpy(dl, l, N * sizeof(int), cudaMemcpyHostToDevice));
RUNTIME_API_CALL(cudaMemcpy(dr, r, N * sizeof(int), cudaMemcpyHostToDevice));
GPU_TEST_FOR((vecAdd<<<blocks, threads>>>(dl, dr, dp, 0, N)));
RUNTIME_API_CALL(cudaMemcpy(p, dp, N * sizeof(int), cudaMemcpyDeviceToHost));
RUNTIME_API_CALL(cudaFree(dl));
RUNTIME_API_CALL(cudaFree(dr));
RUNTIME_API_CALL(cudaFree(dp));
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
}
cudaDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
|
2ef4a2d5f8e8561613b9b573b9b0eb8d5030ff7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define N 10
__global__ void add_gpu( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
} | 2ef4a2d5f8e8561613b9b573b9b0eb8d5030ff7c.cu | #include "includes.h"
#define N 10
__global__ void add_gpu( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
} |
58f0b19b90d77a064cf6b01645d8c415443bb3a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/fluid/operators/metrics/accuracy_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D,
const int64_t* Xdata,
const int64_t* labeldata, int* correct_data,
float* accuracy, int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<float>(result) / static_cast<float>(N);
*total_data = N;
}
}
template <typename T>
class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* inference = ctx.Input<Tensor>("Out");
auto* indices = ctx.Input<Tensor>("Indices");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
auto* correct = ctx.Output<Tensor>("Correct");
auto* total = ctx.Output<Tensor>("Total");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices->data<int64_t>();
const int64_t* label_data = label->data<int64_t>();
int* correct_data = correct->mutable_data<int>(ctx.GetPlace());
int* total_data = total->mutable_data<int>(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
int num_samples = static_cast<int>(inference->dims()[0]);
size_t infer_width = inference->dims()[1];
auto stream = ctx.cuda_device_context().stream();
platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream);
if (num_samples == 0) {
return;
}
hipLaunchKernelGGL(( AccuracyCudaKernel<
PADDLE_CUDA_NUM_THREADS>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
num_samples, infer_width, indices_data, label_data, correct_data,
accuracy_data, total_data);
}
};
} // namespace operators
} // namespace paddle
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
REGISTER_OP_CUDA_KERNEL(
accuracy, paddle::operators::AccuracyOpCUDAKernel<float>,
paddle::operators::AccuracyOpCUDAKernel<double>,
paddle::operators::AccuracyOpCUDAKernel<paddle::platform::float16>);
| 58f0b19b90d77a064cf6b01645d8c415443bb3a7.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/fluid/operators/metrics/accuracy_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D,
const int64_t* Xdata,
const int64_t* labeldata, int* correct_data,
float* accuracy, int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<float>(result) / static_cast<float>(N);
*total_data = N;
}
}
template <typename T>
class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* inference = ctx.Input<Tensor>("Out");
auto* indices = ctx.Input<Tensor>("Indices");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
auto* correct = ctx.Output<Tensor>("Correct");
auto* total = ctx.Output<Tensor>("Total");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices->data<int64_t>();
const int64_t* label_data = label->data<int64_t>();
int* correct_data = correct->mutable_data<int>(ctx.GetPlace());
int* total_data = total->mutable_data<int>(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
int num_samples = static_cast<int>(inference->dims()[0]);
size_t infer_width = inference->dims()[1];
auto stream = ctx.cuda_device_context().stream();
platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream);
if (num_samples == 0) {
return;
}
AccuracyCudaKernel<
PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
num_samples, infer_width, indices_data, label_data, correct_data,
accuracy_data, total_data);
}
};
} // namespace operators
} // namespace paddle
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
REGISTER_OP_CUDA_KERNEL(
accuracy, paddle::operators::AccuracyOpCUDAKernel<float>,
paddle::operators::AccuracyOpCUDAKernel<double>,
paddle::operators::AccuracyOpCUDAKernel<paddle::platform::float16>);
|
2c15985e350f77eb0a72b26247c2a681f0d2def0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdlib.h>
#include<stdio.h>
#include<time.h>
#include<iostream>
using namespace std;
// CUDA settings
#define WARP_SIZE 32
#define WARP_COUNT 16
#define BLOCK_COUNT 13
// Size of each matrix block for dual space computation
#define BLOCK_A_HEIGHT 32
#define CHUNK_SIZE 32
#define BLOCK_B_WIDTH 32
typedef struct {
int dimension1;
int dimension2;
__device__ int getSize() {
return dimension1 * dimension2;
}
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
double *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
double *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------
void createMetadataForC();
void allocateAndInitializeAB();
void allocateC();
void die(const char *error);
void check_error(hipError_t e);
void computeCpuMMM();
void compareHostAndGpuOutput();
//----------------------------------- CUDA function definitions -----------------------------------
__global__ void matrixMultiplicationKernelDualSpace(const double *A_GPU, const double *B_GPU, double *C_GPU,
ArrayMetadata2D A_MD, ArrayMetadata2D B_MD, ArrayMetadata2D C_MD);
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 1000;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
clock_t start = clock();
createMetadataForC();
allocateAndInitializeAB();
allocateC();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Allocation time: " << elapsed << " seconds \n";
start = clock();
int threadsPerBlock = WARP_SIZE * WARP_COUNT;
hipLaunchKernelGGL(( matrixMultiplicationKernelDualSpace), dim3(BLOCK_COUNT), dim3(threadsPerBlock) , 0, 0,
A_GPU, B_GPU, C_GPU, A_MD, B_MD, C_MD);
hipDeviceSynchronize();
check_error(hipGetLastError());
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
C_CPU = (double *) malloc(sizeofC);
check_error(hipMemcpy(C_CPU, C_GPU, sizeofC, hipMemcpyDeviceToHost));
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Execution time: " << elapsed << " seconds \n";
return 0;
}
// gather and store the dimensionality information concerning both C and D
void createMetadataForC() {
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
}
// initialize A and B using a random number generator then copy the two matrix in GPU memory
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(double);
A = (double*) malloc(sizeofA);
check_error(hipMalloc((void **) &A_GPU, sizeofA));
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++)
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A[index] = (rand() % 5);
}
check_error(hipMemcpyAsync(A_GPU, A, sizeofA, hipMemcpyHostToDevice, 0));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(double);
B = (double*) malloc(sizeofB);
check_error(hipMalloc((void **) &B_GPU, sizeofB));
for (int i = 0; i < B_MD.dimension1; i++)
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B[index] = (rand() % 5);
}
check_error(hipMemcpyAsync(B_GPU, B, sizeofB, hipMemcpyHostToDevice, 0));
}
// allocate C in the device and hold a reference of it in the host memory for later use
void allocateC() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
check_error(hipMalloc((void **) &C_GPU, sizeofC));
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__global__ void matrixMultiplicationKernelDualSpace(const double *A_GPU, const double *B_GPU, double *C_GPU,
ArrayMetadata2D A_MD, ArrayMetadata2D B_MD, ArrayMetadata2D C_MD) {
int threadId = threadIdx.x % WARP_SIZE;
int warpId = threadIdx.x / WARP_SIZE;
int blockId = blockIdx.x;
// two shared memory 2D arrays for storing chunks from a number of rows and columns of A and B respectively
__shared__ double A_parts[BLOCK_A_HEIGHT][CHUNK_SIZE];
__shared__ double B_parts[CHUNK_SIZE][BLOCK_B_WIDTH];
// one shared memory array where each warp stores partial D[i][j][l] += A[i][k] * B[j][k] values that at the
// end get accumulated into C[i][j]
__shared__ double D_partialResults[BLOCK_A_HEIGHT][BLOCK_B_WIDTH];
// allocate a consecutive number of rows to each thread block; each will process all columns
int rowsPerBlock = A_MD.dimension1 / BLOCK_COUNT;
int extraRows = A_MD.dimension1 % BLOCK_COUNT;
int rowsBegin = rowsPerBlock * blockId;
int rowsEnd = rowsBegin + rowsPerBlock - 1;
rowsEnd += (blockId < BLOCK_COUNT - 1) ? 0 : extraRows; // last block is taking care of some extra rows;
// ideally, 10these should also be distributed
int commonDimensionLength = A_MD.dimension2;
// at a time BLOCK_A_HEIGHT number of rows are processed
for (int i = rowsBegin; i <= rowsEnd; i += BLOCK_A_HEIGHT) {
// at a time BLOCK_B_WIDTH number of columns are processed
for (int j = 0; j < B_MD.dimension2; j+= BLOCK_B_WIDTH) {
// check how many rows and columns of local A and B parts are valid
int lastValidRow = i + BLOCK_A_HEIGHT - 1;
if (lastValidRow > rowsEnd) lastValidRow = rowsEnd;
int rowCount = lastValidRow - i + 1;
int lastValidColumn = j + BLOCK_B_WIDTH - 1;
if (lastValidColumn >= B_MD.dimension2) lastValidColumn = B_MD.dimension2 - 1;
int columnCount = lastValidColumn - j + 1;
// reset D[i][j]'s to zeros to accumate results for new a (i, j) indices
for (int row = warpId; row < BLOCK_A_HEIGHT; row += WARP_COUNT) {
for (int column = threadId; column < BLOCK_B_WIDTH; column += WARP_SIZE) {
D_partialResults[row][column] = 0;
}
}
// For each row and column, only a section of chunk-size would be downloaded once as instructed
// by the programmer through sub-partition specification
for (int k = 0; k < commonDimensionLength; k += CHUNK_SIZE) {
__syncthreads();
// Cleanup operation .................................................................
// cleanup old/invalid A_parts and B_parts from shared memory
for (int row = warpId; row < BLOCK_A_HEIGHT; row += WARP_COUNT) {
for (int cleanupIndex = threadId; cleanupIndex < CHUNK_SIZE;
cleanupIndex += WARP_SIZE) {
A_parts[row][cleanupIndex] = 0;
}
}
for (int row = warpId; row < CHUNK_SIZE; row += WARP_COUNT) {
for (int cleanupIndex = threadId; cleanupIndex < BLOCK_B_WIDTH;
cleanupIndex += WARP_SIZE) {
B_parts[row][cleanupIndex] = 0;
}
}
// Data read from global to shared memory .............................................
// determine A's row and column boundaries
int beginARow = i;
int endARow = (i + BLOCK_A_HEIGHT - 1) > rowsEnd ?
rowsEnd : beginARow + BLOCK_A_HEIGHT - 1;
int beginAColumn = k;
int endAColumn = (k + CHUNK_SIZE - 1) >= commonDimensionLength ?
commonDimensionLength - 1 : beginAColumn + CHUNK_SIZE - 1;
// download a section of A; differnt warps download different rows
for(int rowForCurrentWarp = beginARow + warpId; rowForCurrentWarp <= endARow;
rowForCurrentWarp += WARP_COUNT) {
int localRowIndex = rowForCurrentWarp - beginARow;
int globalRowStart = rowForCurrentWarp * A_MD.dimension2;
// different threads download different elements of A from the global memory
for (int elementIndex = beginAColumn + threadId;
elementIndex <= endAColumn; elementIndex += WARP_SIZE) {
A_parts[localRowIndex][elementIndex - beginAColumn]
= A_GPU[globalRowStart + elementIndex];
}
}
// determine B's row and column boundaries
int beginBRow = k;
int endBRow = (k + CHUNK_SIZE - 1) >= commonDimensionLength ?
commonDimensionLength - 1 : beginBRow + CHUNK_SIZE - 1;
int beginBColumn = j;
int endBColumn = (j + BLOCK_B_WIDTH - 1) >= B_MD.dimension2 ?
B_MD.dimension2 - 1 : beginBColumn + BLOCK_B_WIDTH - 1;
// download a section of B; different warps download different rows
for (int rowForCurrentWarp = beginBRow + warpId; rowForCurrentWarp <= endBRow;
rowForCurrentWarp += WARP_COUNT) {
int localRowIndex = rowForCurrentWarp - beginBRow;
int globalRowStart = rowForCurrentWarp * B_MD.dimension2;
// different threads download different elements of B from the global memory
for (int elementIndex = beginBColumn + threadId;
elementIndex <= endBColumn; elementIndex += WARP_SIZE) {
B_parts[localRowIndex][elementIndex - beginBColumn]
= B_GPU[globalRowStart + elementIndex];
}
}
//__threadfence_block();
__syncthreads(); // do a sync to make A and B parts available to all threads in the SM
// Block matrix multiplication kernel .............................................
// different threads take care of different B columns
for (int c = threadId; c < columnCount; c+= WARP_SIZE) {
// different warps take care of different rows of A
for (int r = warpId; r < rowCount; r += WARP_COUNT) {
// all threads go through each values of the common dimension
for (int e = 0; e < CHUNK_SIZE; e++) {
D_partialResults[r][c] += A_parts[r][e] * B_parts[e][c];
}
}
}
} // loop over k's ends
//__threadfence_block();
__syncthreads(); // do a sync to make updated D == C available to all threads
// Data upload from shared to global memory .................................................
// let different warps upload different rows and different threads within each warp
// upload different columns of C
for (int row = warpId; row < rowCount; row += WARP_COUNT) {
int rowIndex = i + row;
for (int column = threadId; column < columnCount; column += WARP_SIZE) {
int columnIndex = j + column;
int C_index = rowIndex * C_MD.dimension2 + columnIndex;
C_GPU[C_index] = D_partialResults[row][column];
}
}
} // loop over j's ends
} // loop over i's ends
}
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
C = (double*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
| 2c15985e350f77eb0a72b26247c2a681f0d2def0.cu | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
#include<iostream>
using namespace std;
// CUDA settings
#define WARP_SIZE 32
#define WARP_COUNT 16
#define BLOCK_COUNT 13
// Size of each matrix block for dual space computation
#define BLOCK_A_HEIGHT 32
#define CHUNK_SIZE 32
#define BLOCK_B_WIDTH 32
typedef struct {
int dimension1;
int dimension2;
__device__ int getSize() {
return dimension1 * dimension2;
}
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
double *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
double *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------
void createMetadataForC();
void allocateAndInitializeAB();
void allocateC();
void die(const char *error);
void check_error(cudaError e);
void computeCpuMMM();
void compareHostAndGpuOutput();
//----------------------------------- CUDA function definitions -----------------------------------
__global__ void matrixMultiplicationKernelDualSpace(const double *A_GPU, const double *B_GPU, double *C_GPU,
ArrayMetadata2D A_MD, ArrayMetadata2D B_MD, ArrayMetadata2D C_MD);
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 1000;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
clock_t start = clock();
createMetadataForC();
allocateAndInitializeAB();
allocateC();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Allocation time: " << elapsed << " seconds \n";
start = clock();
int threadsPerBlock = WARP_SIZE * WARP_COUNT;
matrixMultiplicationKernelDualSpace<<< BLOCK_COUNT, threadsPerBlock >>>
(A_GPU, B_GPU, C_GPU, A_MD, B_MD, C_MD);
cudaThreadSynchronize();
check_error(cudaGetLastError());
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
C_CPU = (double *) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Execution time: " << elapsed << " seconds \n";
return 0;
}
// gather and store the dimensionality information concerning both C and D
void createMetadataForC() {
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
}
// initialize A and B using a random number generator then copy the two matrix in GPU memory
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(double);
A = (double*) malloc(sizeofA);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++)
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A[index] = (rand() % 5);
}
check_error(cudaMemcpyAsync(A_GPU, A, sizeofA, cudaMemcpyHostToDevice, 0));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(double);
B = (double*) malloc(sizeofB);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
for (int i = 0; i < B_MD.dimension1; i++)
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B[index] = (rand() % 5);
}
check_error(cudaMemcpyAsync(B_GPU, B, sizeofB, cudaMemcpyHostToDevice, 0));
}
// allocate C in the device and hold a reference of it in the host memory for later use
void allocateC() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__global__ void matrixMultiplicationKernelDualSpace(const double *A_GPU, const double *B_GPU, double *C_GPU,
ArrayMetadata2D A_MD, ArrayMetadata2D B_MD, ArrayMetadata2D C_MD) {
int threadId = threadIdx.x % WARP_SIZE;
int warpId = threadIdx.x / WARP_SIZE;
int blockId = blockIdx.x;
// two shared memory 2D arrays for storing chunks from a number of rows and columns of A and B respectively
__shared__ double A_parts[BLOCK_A_HEIGHT][CHUNK_SIZE];
__shared__ double B_parts[CHUNK_SIZE][BLOCK_B_WIDTH];
// one shared memory array where each warp stores partial D[i][j][l] += A[i][k] * B[j][k] values that at the
// end get accumulated into C[i][j]
__shared__ double D_partialResults[BLOCK_A_HEIGHT][BLOCK_B_WIDTH];
// allocate a consecutive number of rows to each thread block; each will process all columns
int rowsPerBlock = A_MD.dimension1 / BLOCK_COUNT;
int extraRows = A_MD.dimension1 % BLOCK_COUNT;
int rowsBegin = rowsPerBlock * blockId;
int rowsEnd = rowsBegin + rowsPerBlock - 1;
rowsEnd += (blockId < BLOCK_COUNT - 1) ? 0 : extraRows; // last block is taking care of some extra rows;
// ideally, 10these should also be distributed
int commonDimensionLength = A_MD.dimension2;
// at a time BLOCK_A_HEIGHT number of rows are processed
for (int i = rowsBegin; i <= rowsEnd; i += BLOCK_A_HEIGHT) {
// at a time BLOCK_B_WIDTH number of columns are processed
for (int j = 0; j < B_MD.dimension2; j+= BLOCK_B_WIDTH) {
// check how many rows and columns of local A and B parts are valid
int lastValidRow = i + BLOCK_A_HEIGHT - 1;
if (lastValidRow > rowsEnd) lastValidRow = rowsEnd;
int rowCount = lastValidRow - i + 1;
int lastValidColumn = j + BLOCK_B_WIDTH - 1;
if (lastValidColumn >= B_MD.dimension2) lastValidColumn = B_MD.dimension2 - 1;
int columnCount = lastValidColumn - j + 1;
// reset D[i][j]'s to zeros to accumate results for new a (i, j) indices
for (int row = warpId; row < BLOCK_A_HEIGHT; row += WARP_COUNT) {
for (int column = threadId; column < BLOCK_B_WIDTH; column += WARP_SIZE) {
D_partialResults[row][column] = 0;
}
}
// For each row and column, only a section of chunk-size would be downloaded once as instructed
// by the programmer through sub-partition specification
for (int k = 0; k < commonDimensionLength; k += CHUNK_SIZE) {
__syncthreads();
// Cleanup operation .................................................................
// cleanup old/invalid A_parts and B_parts from shared memory
for (int row = warpId; row < BLOCK_A_HEIGHT; row += WARP_COUNT) {
for (int cleanupIndex = threadId; cleanupIndex < CHUNK_SIZE;
cleanupIndex += WARP_SIZE) {
A_parts[row][cleanupIndex] = 0;
}
}
for (int row = warpId; row < CHUNK_SIZE; row += WARP_COUNT) {
for (int cleanupIndex = threadId; cleanupIndex < BLOCK_B_WIDTH;
cleanupIndex += WARP_SIZE) {
B_parts[row][cleanupIndex] = 0;
}
}
// Data read from global to shared memory .............................................
// determine A's row and column boundaries
int beginARow = i;
int endARow = (i + BLOCK_A_HEIGHT - 1) > rowsEnd ?
rowsEnd : beginARow + BLOCK_A_HEIGHT - 1;
int beginAColumn = k;
int endAColumn = (k + CHUNK_SIZE - 1) >= commonDimensionLength ?
commonDimensionLength - 1 : beginAColumn + CHUNK_SIZE - 1;
// download a section of A; differnt warps download different rows
for(int rowForCurrentWarp = beginARow + warpId; rowForCurrentWarp <= endARow;
rowForCurrentWarp += WARP_COUNT) {
int localRowIndex = rowForCurrentWarp - beginARow;
int globalRowStart = rowForCurrentWarp * A_MD.dimension2;
// different threads download different elements of A from the global memory
for (int elementIndex = beginAColumn + threadId;
elementIndex <= endAColumn; elementIndex += WARP_SIZE) {
A_parts[localRowIndex][elementIndex - beginAColumn]
= A_GPU[globalRowStart + elementIndex];
}
}
// determine B's row and column boundaries
int beginBRow = k;
int endBRow = (k + CHUNK_SIZE - 1) >= commonDimensionLength ?
commonDimensionLength - 1 : beginBRow + CHUNK_SIZE - 1;
int beginBColumn = j;
int endBColumn = (j + BLOCK_B_WIDTH - 1) >= B_MD.dimension2 ?
B_MD.dimension2 - 1 : beginBColumn + BLOCK_B_WIDTH - 1;
// download a section of B; different warps download different rows
for (int rowForCurrentWarp = beginBRow + warpId; rowForCurrentWarp <= endBRow;
rowForCurrentWarp += WARP_COUNT) {
int localRowIndex = rowForCurrentWarp - beginBRow;
int globalRowStart = rowForCurrentWarp * B_MD.dimension2;
// different threads download different elements of B from the global memory
for (int elementIndex = beginBColumn + threadId;
elementIndex <= endBColumn; elementIndex += WARP_SIZE) {
B_parts[localRowIndex][elementIndex - beginBColumn]
= B_GPU[globalRowStart + elementIndex];
}
}
//__threadfence_block();
__syncthreads(); // do a sync to make A and B parts available to all threads in the SM
// Block matrix multiplication kernel .............................................
// different threads take care of different B columns
for (int c = threadId; c < columnCount; c+= WARP_SIZE) {
// different warps take care of different rows of A
for (int r = warpId; r < rowCount; r += WARP_COUNT) {
// all threads go through each values of the common dimension
for (int e = 0; e < CHUNK_SIZE; e++) {
D_partialResults[r][c] += A_parts[r][e] * B_parts[e][c];
}
}
}
} // loop over k's ends
//__threadfence_block();
__syncthreads(); // do a sync to make updated D == C available to all threads
// Data upload from shared to global memory .................................................
// let different warps upload different rows and different threads within each warp
// upload different columns of C
for (int row = warpId; row < rowCount; row += WARP_COUNT) {
int rowIndex = i + row;
for (int column = threadId; column < columnCount; column += WARP_SIZE) {
int columnIndex = j + column;
int C_index = rowIndex * C_MD.dimension2 + columnIndex;
C_GPU[C_index] = D_partialResults[row][column];
}
}
} // loop over j's ends
} // loop over i's ends
}
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(double);
C = (double*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
|
d69583f446ed33ef9bf7b3902ebb0394b2c3f548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
#include "cudnn_dlibapi.h"
#include <math_constants.h>
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(hipSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(hipGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
hipDeviceProp_t props;
CHECK_CUDA(hipGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(hipGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(hipDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access != 0;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(hipDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = hipDeviceEnablePeerAccess(peer_device_id, 0);
if (err == hipSuccess)
{
call_disable = true;
}
else if (err == hipErrorPeerAccessAlreadyEnabled)
{
// call hipGetLastError() to dispose of this error since we don't
// care.
auto err2 = hipGetLastError();
if (err2 != hipErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(hipDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps)
{
// initialize invnorms before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = eps;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto p = data + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += p[j]*p[j];
// and store the sum into invnorms[i]
warp_reduce_atomic_add(invnorms[i], temp);
}
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = 1.0/std::sqrt(invnorms[i]);
}
void inverse_norms (
resizable_tensor& invnorms,
const tensor& data,
const double eps
)
{
invnorms.set_size(data.num_samples());
launch_kernel(_cuda_inverse_norms, max_jobs(data.size()/data.num_samples(), data.num_samples()),
invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
// initialize out before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
out[i] = 0;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
__global__ void _cuda_dot_prods_add_to(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
void dot_prods (
resizable_tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
DLIB_CASSERT(have_same_dimensions(lhs,rhs));
out.set_size(lhs.num_samples());
if (out.size() == 0)
return;
const auto nr = lhs.num_samples();
const auto nc = lhs.size()/lhs.num_samples();
launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc);
}
void dot_prods (
bool add_to,
tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
DLIB_CASSERT(have_same_dimensions(lhs,rhs));
DLIB_CASSERT(out.k() == 1 && out.nr() == 1 && out.nc() == 1);
DLIB_CASSERT(out.size() == lhs.num_samples());
const auto nr = lhs.num_samples();
const auto nc = lhs.size()/lhs.num_samples();
if (add_to)
launch_kernel(_cuda_dot_prods_add_to, max_jobs(nc,nr), out.device(), lhs.device(), rhs.device(), nr, nc);
else
launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j%nc];
}
}
void scale_columns (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j/nc];
}
}
void scale_rows (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
__global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
void scale_rows2 (
float beta,
tensor& out,
const tensor& m1,
const tensor& m2,
const tensor& v1,
const tensor& v2
)
{
if (beta == 0)
{
launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(),
m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(),
m1.size()/m1.num_samples());
}
else
{
launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta,
out.device(), m1.device(), m2.device(), v1.device(), v2.device(),
m1.num_samples(), m1.size()/m1.num_samples());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_exp(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::exp(src[i]);
}
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log(src[i]);
}
void log (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log10(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log10(src[i]);
}
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() );
const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) );
if (dest.size() == 0)
return;
const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range_y(0, ks))
for (auto j : grid_stride_range(0, 1))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k());
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2));
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k());
if (dest.size() == 0)
return;
const auto bs = src1.nr()*src1.nc();
const auto n = src1.num_samples()*src1.k();
if (add_to)
launch_kernel(_cuda_multiply_conv2_add_to, max_jobs(bs,n),
dest.device(), src1.device(), n, src2.device(), bs, src1.k());
else
launch_kernel(_cuda_multiply_conv2, max_jobs(bs,n),
dest.device(), src1.device(), n, src2.device(), bs, src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_scale_channels_add_to(float* d, const float* src, size_t n, const float* scales, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
auto k = i/bs;
d[i] += src[i]*scales[k];
}
}
__global__ void _cuda_scale_channels(float* d, const float* src, size_t n, const float* scales, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
auto k = i/bs;
d[i] = src[i]*scales[k];
}
}
void scale_channels (
bool add_to,
tensor& dest,
const tensor& src,
const tensor& scales
)
{
DLIB_CASSERT(have_same_dimensions(dest,src) &&
scales.num_samples() == src.num_samples() &&
scales.k() == src.k() &&
scales.nr() == 1 &&
scales.nc() == 1 );
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_scale_channels_add_to,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), scales.device(), src.nr()*src.nc());
else
launch_kernel(_cuda_scale_channels,max_jobs(dest.size()),
dest.device_write_only(), src.device(), src.size(), scales.device(), src.nr()*src.nc());
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_mult2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1*v2;
}
}
__global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] += v1*v2;
}
}
void multiply_zero_padded (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
if (add_to)
launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
else
launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
if (add_to)
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size());
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_rect(
float* d,
const float* s1,
const float* s2,
const float* s3,
float A,
float B,
float C,
size_t start_idx,
size_t n,
size_t rect_nc,
size_t total_nc
)
{
for (auto i : grid_stride_range(0, n))
{
size_t r = i/rect_nc;
size_t c = i%rect_nc;
size_t idx = r*total_nc + c + start_idx;
d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx];
}
}
void affine_transform(
const rectangle& rect,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
float A,
float B,
float C
)
{
DLIB_CASSERT(dest.size() == src1.size());
DLIB_CASSERT(dest.size() == src2.size());
DLIB_CASSERT(dest.size() == src3.size());
DLIB_CASSERT(dest.num_samples() == src1.num_samples());
DLIB_CASSERT(dest.num_samples() == src2.num_samples());
DLIB_CASSERT(dest.num_samples() == src3.num_samples());
DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect));
launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()),
dest.device(), src1.device(), src2.device(), src3.device(), A, B, C,
rect.left() + rect.top()*(dest.size()/dest.num_samples()),
rect.area(),
rect.width(),
dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = beta*dest[i] + alpha*src[i/stride];
}
}
__global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = alpha*src[i/stride];
}
}
void add_cv_to_all_columns(
float beta,
tensor& dest,
float alpha,
const tensor& src
)
{
DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size());
if (beta == 0)
launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
else
launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
DLIB_CASSERT(begin <= end && end <= dest.size());
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())));
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size());
DLIB_CASSERT(begin <= end && end <= params.size());
const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(have_same_dimensions(A, B));
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k());
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0);
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _set_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] = val;
}
void set_tensor (
tensor& t,
float value
)
{
launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// ----------------------------------------------------------------------------------------
__global__ void _scale_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] *= val;
}
void scale_tensor (
tensor& t,
float value
)
{
launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size());
DLIB_CASSERT(idx < result.size());
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_leaky_relu(const float* s, float* d, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = alpha * s[i];
}
}
void leaky_relu(
tensor& dest,
const tensor &src,
const float alpha
)
{
launch_kernel(_cuda_leaky_relu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), alpha);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_leaky_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
out[i] = gi[i];
else
out[i] = alpha * gi[i];
}
}
__global__ void _cuda_leaky_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
out[i] += gi[i];
else
out[i] += alpha * gi[i];
}
}
void leaky_relu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const float alpha
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
{
launch_kernel(_cuda_leaky_relu_gradient_inplace, max_jobs(grad.size()),
out, src.device(), gi, grad.size(), alpha);
}
else
{
launch_kernel(_cuda_leaky_relu_gradient, max_jobs(grad.size()),
out, src.device(), gi, grad.size(), alpha);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_mish(const float* s, float* d, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
const auto e = ::exp(s[i]);
const auto delta = 2*e + e*e + 2;
d[i] = s[i] - 2*s[i]/delta;
}
}
void mish (
tensor& dest,
const tensor& src
)
{
launch_kernel(_cuda_mish, max_jobs(dest.size()), src.device(), dest.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__device__ float mish_compute_gradient(float x)
{
if (x >= 8)
return 1.f;
if (x <= -8)
return 0.f;
const auto e = ::exp(x);
const auto delta = 2*e + e*e + 2;
const auto omega = 4*(x + 1) + 4*e*e + e*e*e + e*(4*x + 6);
return e*omega/(delta*delta);
}
__global__ void _cuda_mish_gradient_inplace(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] = gi[i]*mish_compute_gradient(s[i]);
}
__global__ void _cuda_mish_gradient(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] += gi[i]*mish_compute_gradient(s[i]);
}
void mish_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
launch_kernel(_cuda_mish_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size());
else
launch_kernel(_cuda_mish_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_gelu(const float* s, float* d, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s[i] * normcdf(s[i]);
}
}
void gelu (
tensor& dest,
const tensor& src
)
{
launch_kernel(_cuda_gelu, max_jobs(dest.size()), src.device(), dest.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__device__ float gelu_compute_gradient(float x)
{
const float beta = 1.0f / CUDART_SQRT_2PI;
const float cdf = normcdf(x);
const float pdf = beta*::exp(-0.5f*x*x);
return cdf + x * pdf;
}
__global__ void _cuda_gelu_gradient_inplace(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] = gi[i]*gelu_compute_gradient(s[i]);
}
__global__ void _cuda_gelu_gradient(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] += gi[i]*gelu_compute_gradient(s[i]);
}
void gelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
launch_kernel(_cuda_gelu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size());
else
launch_kernel(_cuda_gelu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*snc+left];
float tr = s[sidx+top*snc+right];
float bl = s[sidx+bottom*snc+left];
float br = s[sidx+bottom*snc+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[i] = temp;
}
}
__global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*src_row_stride+left];
float tr = s[sidx+top*src_row_stride+right];
float bl = s[sidx+bottom*src_row_stride+left];
float br = s[sidx+bottom*src_row_stride+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[didx] = temp;
}
}
void resize_bilinear (
tensor& dest,
long dest_row_stride,
long dest_channel_stride,
const tensor& src,
long src_row_stride,
long src_channel_stride
)
{
DLIB_CASSERT(is_same_object(dest, src)==false);
DLIB_CASSERT(dest.num_samples() == src.num_samples());
DLIB_CASSERT(dest.k() == src.k());
if (dest.size() == 0 || src.size() == 0)
return;
const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1);
const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1);
if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride &&
src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride)
{
launch_kernel(_cuda_resize_bilinear,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src.nr()*src.nc(), src.nr(), src.nc(), src.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_strided,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src_channel_stride, src.nr(), src.nc(), src.device(),
x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const float tmp = d[i];
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac));
}
}
__global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int didx = channel*dest_chan_size_strided;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float tmp = d[didx + r*dest_row_stride+c];
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac));
}
}
void resize_bilinear_gradient (
tensor& grad,
long grad_row_stride,
long grad_channel_stride,
const tensor& gradient_input,
long gradient_input_row_stride,
long gradient_input_channel_stride
)
{
DLIB_CASSERT(is_same_object(grad, gradient_input)==false);
DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples());
DLIB_CASSERT(gradient_input.k() == grad.k());
if (grad.size() == 0 || gradient_input.size() == 0)
return;
const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1);
const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1);
if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride &&
gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride)
{
launch_kernel(_cuda_resize_bilinear_gradient,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_gradient_strided,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad_channel_stride, grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] += src[blk*src_stride + j];
}
}
__global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] = src[blk*src_stride + j];
}
}
void copy_tensor(
bool add_to,
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
if (add_to)
{
launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
else
{
launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
}
// ----------------------------------------------------------------------------------------
__device__ float cuda_log1pexp(float x)
{
if (x <= -18)
return ::exp(x);
else if (-18 < x && x <= 9)
return std::log1pf(::exp(x));
else if (9 < x && x <= 16)
return x + expf(-x);
else
return x;
}
__global__ void _cuda_compute_loss_binary_log_per_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const float y = truth[i];
if (y > 0.f)
{
const float temp = cuda_log1pexp(-out_data[i]);
loss += y*temp;
g[i] = y*scale*(g[i]-1);
}
else if (y < 0.f)
{
const float temp = -(-out_data[i]-cuda_log1pexp(-out_data[i]));
loss += -y*temp;
g[i] = -y*scale*g[i];
}
else
{
g[i] = 0.f;
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
__device__ float cuda_safe_log(float x, float epsilon = 1e-10)
{
// Prevent trying to calculate the logarithm of a very small number (let alone zero)
if (x >= epsilon)
return ::log(x);
else
return ::log(epsilon);
}
__global__ void _cuda_compute_loss_multiclass_log_per_pixel(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, uint16_t label_to_ignore, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const size_t k = (i/plane_size)%nk;
const size_t idx = (i%plane_size) + plane_size*(i/sample_size);
const size_t y = truth[idx];
if (k == y)
{
loss -= cuda_safe_log(g[i]);
g[i] = scale*(g[i] - 1);
}
else if (y == label_to_ignore)
{
g[i] = 0.f;
}
else
{
g[i] = scale*g[i];
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
__global__ void _cuda_compute_loss_multiclass_log_per_pixel_weighted(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, const float* weights, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const size_t k = (i/plane_size)%nk;
const size_t idx = (i%plane_size) + plane_size*(i/sample_size);
const size_t y = truth[idx];
const float weight = weights[idx];
if (k == y)
{
loss -= weight*cuda_safe_log(g[i]);
g[i] = weight*scale*(g[i] - 1);
}
else
{
g[i] = weight*scale*g[i];
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_loss_mean_squared_per_channel_and_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale)
{
float loss = 0;
for (auto i : grid_stride_range(0, n))
{
const float y = truth[i];
const float temp = y - out_data[i];
loss += temp * temp;
g[i] = -temp * scale;
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
void compute_loss_binary_log_per_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const float> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float)));
sigmoid(gradient, subnetwork_output);
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_binary_log_per_pixel, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_multiclass_log_per_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const uint16_t> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float)));
softmax(gradient, subnetwork_output);
static const uint16_t label_to_ignore = std::numeric_limits<uint16_t>::max();
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), label_to_ignore, scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_multiclass_log_per_pixel_weighted::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const uint16_t> truth_buffer,
cuda_data_ptr<const float> weights_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float)));
softmax(gradient, subnetwork_output);
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel_weighted, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), weights_buffer.data(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_mean_squared_per_channel_and_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const float> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float)));
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.k() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_mean_squared_per_channel_and_pixel , max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
// ----------------------------------------------------------------------------------------
}
}
| d69583f446ed33ef9bf7b3902ebb0394b2c3f548.cu | // Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
#include "cudnn_dlibapi.h"
#include <math_constants.h>
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(cudaSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(cudaGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
cudaDeviceProp props;
CHECK_CUDA(cudaGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(cudaGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(cudaDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access != 0;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(cudaDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = cudaDeviceEnablePeerAccess(peer_device_id, 0);
if (err == cudaSuccess)
{
call_disable = true;
}
else if (err == cudaErrorPeerAccessAlreadyEnabled)
{
// call cudaGetLastError() to dispose of this error since we don't
// care.
auto err2 = cudaGetLastError();
if (err2 != cudaErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(cudaDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps)
{
// initialize invnorms before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = eps;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto p = data + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += p[j]*p[j];
// and store the sum into invnorms[i]
warp_reduce_atomic_add(invnorms[i], temp);
}
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
invnorms[i] = 1.0/std::sqrt(invnorms[i]);
}
void inverse_norms (
resizable_tensor& invnorms,
const tensor& data,
const double eps
)
{
invnorms.set_size(data.num_samples());
launch_kernel(_cuda_inverse_norms, max_jobs(data.size()/data.num_samples(), data.num_samples()),
invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
// initialize out before we begin.
for (auto i : grid_stride_range_y(0, nr))
for (auto j : grid_stride_range(0, 1))
out[i] = 0;
__syncthreads();
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
__global__ void _cuda_dot_prods_add_to(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc)
{
for (auto i : grid_stride_range_y(0, nr))
{
auto l = lhs + i*nc;
auto r = rhs + i*nc;
float temp = 0;
for (auto j : grid_stride_range(0, nc))
temp += l[j]*r[j];
// and store the sum into out[i]
warp_reduce_atomic_add(out[i], temp);
}
}
void dot_prods (
resizable_tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
DLIB_CASSERT(have_same_dimensions(lhs,rhs));
out.set_size(lhs.num_samples());
if (out.size() == 0)
return;
const auto nr = lhs.num_samples();
const auto nc = lhs.size()/lhs.num_samples();
launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc);
}
void dot_prods (
bool add_to,
tensor& out,
const tensor& lhs,
const tensor& rhs
)
{
DLIB_CASSERT(have_same_dimensions(lhs,rhs));
DLIB_CASSERT(out.k() == 1 && out.nr() == 1 && out.nc() == 1);
DLIB_CASSERT(out.size() == lhs.num_samples());
const auto nr = lhs.num_samples();
const auto nc = lhs.size()/lhs.num_samples();
if (add_to)
launch_kernel(_cuda_dot_prods_add_to, max_jobs(nc,nr), out.device(), lhs.device(), rhs.device(), nr, nc);
else
launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j%nc];
}
}
void scale_columns (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = m[j]*v[j/nc];
}
}
void scale_rows (
tensor& out,
const tensor& m,
const tensor& v
)
{
launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
__global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc)
{
for (auto j : grid_stride_range(0, nr*nc))
{
out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc];
}
}
void scale_rows2 (
float beta,
tensor& out,
const tensor& m1,
const tensor& m2,
const tensor& v1,
const tensor& v2
)
{
if (beta == 0)
{
launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(),
m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(),
m1.size()/m1.num_samples());
}
else
{
launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta,
out.device(), m1.device(), m2.device(), v1.device(), v2.device(),
m1.num_samples(), m1.size()/m1.num_samples());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_exp(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::exp(src[i]);
}
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log(src[i]);
}
void log (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log10(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log10(src[i]);
}
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() );
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) );
if (dest.size() == 0)
return;
const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range_y(0, ks))
for (auto j : grid_stride_range(0, 1))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k());
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2));
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k());
if (dest.size() == 0)
return;
const auto bs = src1.nr()*src1.nc();
const auto n = src1.num_samples()*src1.k();
if (add_to)
launch_kernel(_cuda_multiply_conv2_add_to, max_jobs(bs,n),
dest.device(), src1.device(), n, src2.device(), bs, src1.k());
else
launch_kernel(_cuda_multiply_conv2, max_jobs(bs,n),
dest.device(), src1.device(), n, src2.device(), bs, src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_scale_channels_add_to(float* d, const float* src, size_t n, const float* scales, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
auto k = i/bs;
d[i] += src[i]*scales[k];
}
}
__global__ void _cuda_scale_channels(float* d, const float* src, size_t n, const float* scales, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
auto k = i/bs;
d[i] = src[i]*scales[k];
}
}
void scale_channels (
bool add_to,
tensor& dest,
const tensor& src,
const tensor& scales
)
{
DLIB_CASSERT(have_same_dimensions(dest,src) &&
scales.num_samples() == src.num_samples() &&
scales.k() == src.k() &&
scales.nr() == 1 &&
scales.nc() == 1 );
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_scale_channels_add_to,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), scales.device(), src.nr()*src.nc());
else
launch_kernel(_cuda_scale_channels,max_jobs(dest.size()),
dest.device_write_only(), src.device(), src.size(), scales.device(), src.nr()*src.nc());
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_mult2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1*v2;
}
}
__global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] += v1*v2;
}
}
void multiply_zero_padded (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
if (add_to)
launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
else
launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
if (add_to)
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_mult2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size());
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_rect(
float* d,
const float* s1,
const float* s2,
const float* s3,
float A,
float B,
float C,
size_t start_idx,
size_t n,
size_t rect_nc,
size_t total_nc
)
{
for (auto i : grid_stride_range(0, n))
{
size_t r = i/rect_nc;
size_t c = i%rect_nc;
size_t idx = r*total_nc + c + start_idx;
d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx];
}
}
void affine_transform(
const rectangle& rect,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
float A,
float B,
float C
)
{
DLIB_CASSERT(dest.size() == src1.size());
DLIB_CASSERT(dest.size() == src2.size());
DLIB_CASSERT(dest.size() == src3.size());
DLIB_CASSERT(dest.num_samples() == src1.num_samples());
DLIB_CASSERT(dest.num_samples() == src2.num_samples());
DLIB_CASSERT(dest.num_samples() == src3.num_samples());
DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect));
launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()),
dest.device(), src1.device(), src2.device(), src3.device(), A, B, C,
rect.left() + rect.top()*(dest.size()/dest.num_samples()),
rect.area(),
rect.width(),
dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = beta*dest[i] + alpha*src[i/stride];
}
}
__global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride)
{
for (auto i : grid_stride_range(0, size))
{
dest[i] = alpha*src[i/stride];
}
}
void add_cv_to_all_columns(
float beta,
tensor& dest,
float alpha,
const tensor& src
)
{
DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size());
if (beta == 0)
launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
else
launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size());
DLIB_CASSERT(begin <= end && end <= dest.size());
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())));
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size());
DLIB_CASSERT(begin <= end && end <= params.size());
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(have_same_dimensions(A, B));
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k());
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0);
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _set_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] = val;
}
void set_tensor (
tensor& t,
float value
)
{
launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// ----------------------------------------------------------------------------------------
__global__ void _scale_tensor(float* out, size_t n, const float val)
{
for (auto i : grid_stride_range(0, n))
out[i] *= val;
}
void scale_tensor (
tensor& t,
float value
)
{
launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value);
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size());
DLIB_CASSERT(idx < result.size());
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_leaky_relu(const float* s, float* d, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = alpha * s[i];
}
}
void leaky_relu(
tensor& dest,
const tensor &src,
const float alpha
)
{
launch_kernel(_cuda_leaky_relu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), alpha);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_leaky_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
out[i] = gi[i];
else
out[i] = alpha * gi[i];
}
}
__global__ void _cuda_leaky_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha)
{
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
out[i] += gi[i];
else
out[i] += alpha * gi[i];
}
}
void leaky_relu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const float alpha
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
{
launch_kernel(_cuda_leaky_relu_gradient_inplace, max_jobs(grad.size()),
out, src.device(), gi, grad.size(), alpha);
}
else
{
launch_kernel(_cuda_leaky_relu_gradient, max_jobs(grad.size()),
out, src.device(), gi, grad.size(), alpha);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_mish(const float* s, float* d, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
const auto e = std::exp(s[i]);
const auto delta = 2*e + e*e + 2;
d[i] = s[i] - 2*s[i]/delta;
}
}
void mish (
tensor& dest,
const tensor& src
)
{
launch_kernel(_cuda_mish, max_jobs(dest.size()), src.device(), dest.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__device__ float mish_compute_gradient(float x)
{
if (x >= 8)
return 1.f;
if (x <= -8)
return 0.f;
const auto e = std::exp(x);
const auto delta = 2*e + e*e + 2;
const auto omega = 4*(x + 1) + 4*e*e + e*e*e + e*(4*x + 6);
return e*omega/(delta*delta);
}
__global__ void _cuda_mish_gradient_inplace(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] = gi[i]*mish_compute_gradient(s[i]);
}
__global__ void _cuda_mish_gradient(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] += gi[i]*mish_compute_gradient(s[i]);
}
void mish_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
launch_kernel(_cuda_mish_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size());
else
launch_kernel(_cuda_mish_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_gelu(const float* s, float* d, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s[i] * normcdf(s[i]);
}
}
void gelu (
tensor& dest,
const tensor& src
)
{
launch_kernel(_cuda_gelu, max_jobs(dest.size()), src.device(), dest.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__device__ float gelu_compute_gradient(float x)
{
const float beta = 1.0f / CUDART_SQRT_2PI;
const float cdf = normcdf(x);
const float pdf = beta*std::exp(-0.5f*x*x);
return cdf + x * pdf;
}
__global__ void _cuda_gelu_gradient_inplace(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] = gi[i]*gelu_compute_gradient(s[i]);
}
__global__ void _cuda_gelu_gradient(float* out, const float* s, const float* gi, size_t n)
{
for (auto i : grid_stride_range(0, n))
out[i] += gi[i]*gelu_compute_gradient(s[i]);
}
void gelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input
)
{
float* out = grad.device();
const float* gi = gradient_input.device();
if (out == gi)
launch_kernel(_cuda_gelu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size());
else
launch_kernel(_cuda_gelu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*snc+left];
float tr = s[sidx+top*snc+right];
float bl = s[sidx+bottom*snc+left];
float br = s[sidx+bottom*snc+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[i] = temp;
}
}
__global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d,
size_t schan_size, int snr, int snc, const float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
float tl = s[sidx+top*src_row_stride+left];
float tr = s[sidx+top*src_row_stride+right];
float bl = s[sidx+bottom*src_row_stride+left];
float br = s[sidx+bottom*src_row_stride+right];
float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) +
tb_frac*((1-lr_frac)*bl + lr_frac*br);
d[didx] = temp;
}
}
void resize_bilinear (
tensor& dest,
long dest_row_stride,
long dest_channel_stride,
const tensor& src,
long src_row_stride,
long src_channel_stride
)
{
DLIB_CASSERT(is_same_object(dest, src)==false);
DLIB_CASSERT(dest.num_samples() == src.num_samples());
DLIB_CASSERT(dest.k() == src.k());
if (dest.size() == 0 || src.size() == 0)
return;
const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1);
const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1);
if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride &&
src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride)
{
launch_kernel(_cuda_resize_bilinear,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src.nr()*src.nc(), src.nr(), src.nc(), src.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_strided,
dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(),
src_channel_stride, src.nr(), src.nc(), src.device(),
x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale)
{
for(auto i : grid_stride_range(0, dsize))
{
const float tmp = d[i];
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac));
}
}
__global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d,
size_t schan_size, int snr, int snc, float* s,
const float x_scale, const float y_scale,
size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided
)
{
for(auto i : grid_stride_range(0, dsize))
{
const int idx = i%dchan_size;
const int channel = i/dchan_size;
const int didx = channel*dest_chan_size_strided;
const int sidx = channel*schan_size;
const int r = idx/dnc;
const int c = idx%dnc;
const float tmp = d[didx + r*dest_row_stride+c];
const float y = r*y_scale;
const int top = static_cast<int>(::floor(y));
const int bottom = ::min(top+1, snr-1);
const float tb_frac = y - top;
const float x = c*x_scale;
const int left = static_cast<int>(::floor(x));
const int right = ::min(left+1, snc-1);
const float lr_frac = x - left;
atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac));
atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac));
}
}
void resize_bilinear_gradient (
tensor& grad,
long grad_row_stride,
long grad_channel_stride,
const tensor& gradient_input,
long gradient_input_row_stride,
long gradient_input_channel_stride
)
{
DLIB_CASSERT(is_same_object(grad, gradient_input)==false);
DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples());
DLIB_CASSERT(gradient_input.k() == grad.k());
if (grad.size() == 0 || gradient_input.size() == 0)
return;
const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1);
const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1);
if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride &&
gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride)
{
launch_kernel(_cuda_resize_bilinear_gradient,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale);
}
else
{
launch_kernel(_cuda_resize_bilinear_gradient_strided,
gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(),
grad_channel_stride, grad.nr(), grad.nc(), grad.device(),
x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride);
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] += src[blk*src_stride + j];
}
}
__global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size)
{
for(auto i : grid_stride_range(0, size))
{
size_t blk = i/block_size;
size_t j = i%block_size;
dest[blk*dest_stride + j] = src[blk*src_stride + j];
}
}
void copy_tensor(
bool add_to,
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
if (add_to)
{
launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
else
{
launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()),
dest_p, block_size*dest.num_samples(),
src_p, dest_sample_size, src_sample_size, block_size);
}
}
// ----------------------------------------------------------------------------------------
__device__ float cuda_log1pexp(float x)
{
if (x <= -18)
return std::exp(x);
else if (-18 < x && x <= 9)
return std::log1pf(std::exp(x));
else if (9 < x && x <= 16)
return x + expf(-x);
else
return x;
}
__global__ void _cuda_compute_loss_binary_log_per_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const float y = truth[i];
if (y > 0.f)
{
const float temp = cuda_log1pexp(-out_data[i]);
loss += y*temp;
g[i] = y*scale*(g[i]-1);
}
else if (y < 0.f)
{
const float temp = -(-out_data[i]-cuda_log1pexp(-out_data[i]));
loss += -y*temp;
g[i] = -y*scale*g[i];
}
else
{
g[i] = 0.f;
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
__device__ float cuda_safe_log(float x, float epsilon = 1e-10)
{
// Prevent trying to calculate the logarithm of a very small number (let alone zero)
if (x >= epsilon)
return ::log(x);
else
return ::log(epsilon);
}
__global__ void _cuda_compute_loss_multiclass_log_per_pixel(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, uint16_t label_to_ignore, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const size_t k = (i/plane_size)%nk;
const size_t idx = (i%plane_size) + plane_size*(i/sample_size);
const size_t y = truth[idx];
if (k == y)
{
loss -= cuda_safe_log(g[i]);
g[i] = scale*(g[i] - 1);
}
else if (y == label_to_ignore)
{
g[i] = 0.f;
}
else
{
g[i] = scale*g[i];
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
__global__ void _cuda_compute_loss_multiclass_log_per_pixel_weighted(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, const float* weights, const float scale)
{
float loss = 0;
for(auto i : grid_stride_range(0, n))
{
const size_t k = (i/plane_size)%nk;
const size_t idx = (i%plane_size) + plane_size*(i/sample_size);
const size_t y = truth[idx];
const float weight = weights[idx];
if (k == y)
{
loss -= weight*cuda_safe_log(g[i]);
g[i] = weight*scale*(g[i] - 1);
}
else
{
g[i] = weight*scale*g[i];
}
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_loss_mean_squared_per_channel_and_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale)
{
float loss = 0;
for (auto i : grid_stride_range(0, n))
{
const float y = truth[i];
const float temp = y - out_data[i];
loss += temp * temp;
g[i] = -temp * scale;
}
warp_reduce_atomic_add(*loss_out, loss);
}
// ----------------------------------------------------------------------------------------
void compute_loss_binary_log_per_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const float> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float)));
sigmoid(gradient, subnetwork_output);
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_binary_log_per_pixel, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_multiclass_log_per_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const uint16_t> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float)));
softmax(gradient, subnetwork_output);
static const uint16_t label_to_ignore = std::numeric_limits<uint16_t>::max();
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), label_to_ignore, scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_multiclass_log_per_pixel_weighted::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const uint16_t> truth_buffer,
cuda_data_ptr<const float> weights_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float)));
softmax(gradient, subnetwork_output);
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel_weighted, max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), weights_buffer.data(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
void compute_loss_mean_squared_per_channel_and_pixel::
do_work(
cuda_data_ptr<float> loss_work_buffer,
cuda_data_ptr<const float> truth_buffer,
const tensor& subnetwork_output,
tensor& gradient,
double& loss
)
{
CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float)));
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.k() * subnetwork_output.nr() * subnetwork_output.nc());
launch_kernel(_cuda_compute_loss_mean_squared_per_channel_and_pixel , max_jobs(gradient.size()),
loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale);
float floss;
dlib::cuda::memcpy(&floss, loss_work_buffer);
loss = scale*floss;
}
// ----------------------------------------------------------------------------------------
}
}
|
3b10eb4442a7534922240f00bd8750eced9b9411.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/operators/math/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
template <typename T>
__global__ void TransposeQkvKernel(const int H, const T *input, const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xSxB
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
void TransQKVWithBias(const int batch, const int seq_len, const int head_size,
const int head_num, const float *input, const float *bias,
float *output, hipStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 4));
hipLaunchKernelGGL(( TransposeQkvKernel<float4>), dim3(grid), dim3(block), 0, stream, h, input4, bias4,
output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<float2>), dim3(grid), dim3(block), 0, stream, h, input2, bias2,
output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
hipLaunchKernelGGL(( TransposeQkvKernel<float>), dim3(grid), dim3(block), 0, stream, head_size, input,
bias, output);
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = framework::Tensor;
auto *input = context.Input<framework::Tensor>("Input");
auto *w = context.Input<framework::Tensor>("W");
auto *bias = context.Input<framework::Tensor>("Bias");
auto &bias_qk = detail::Ref(context.Input<framework::Tensor>("BiasQK"),
"Cannot find QK");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk.data<T>();
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<framework::Tensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d = out->mutable_data<T>(context.GetPlace());
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
framework::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize({batch * seq_len, framework::product(temp_out_dims) /
(batch * seq_len)});
auto *temp_out_data = temp_out_tensor.mutable_data<T>(context.GetPlace());
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<T>(context.GetPlace());
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
auto stream = device_ctx.stream();
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch, seq_len, head_size, head_number, temp_out_data,
bias_d, tptr, stream);
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx, batch, seq_len, head_number, head_size,
qkptr, bias_qk_d, tptr, scale, T(0.0));
int grid = batch * head_number * seq_len;
int block = head_size;
hipLaunchKernelGGL(( transpose<T>), dim3(grid), dim3(block), 0, stream, tptr, output_d, batch, seq_len,
head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<paddle::platform::CUDADeviceContext, float>);
| 3b10eb4442a7534922240f00bd8750eced9b9411.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/operators/math/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
template <typename T>
__global__ void TransposeQkvKernel(const int H, const T *input, const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xSxB
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
void TransQKVWithBias(const int batch, const int seq_len, const int head_size,
const int head_num, const float *input, const float *bias,
float *output, cudaStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 4));
TransposeQkvKernel<float4><<<grid, block, 0, stream>>>(h, input4, bias4,
output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
TransposeQkvKernel<float2><<<grid, block, 0, stream>>>(h, input2, bias2,
output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
TransposeQkvKernel<float><<<grid, block, 0, stream>>>(head_size, input,
bias, output);
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = framework::Tensor;
auto *input = context.Input<framework::Tensor>("Input");
auto *w = context.Input<framework::Tensor>("W");
auto *bias = context.Input<framework::Tensor>("Bias");
auto &bias_qk = detail::Ref(context.Input<framework::Tensor>("BiasQK"),
"Cannot find QK");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk.data<T>();
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<framework::Tensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d = out->mutable_data<T>(context.GetPlace());
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
framework::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize({batch * seq_len, framework::product(temp_out_dims) /
(batch * seq_len)});
auto *temp_out_data = temp_out_tensor.mutable_data<T>(context.GetPlace());
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<T>(context.GetPlace());
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
auto stream = device_ctx.stream();
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch, seq_len, head_size, head_number, temp_out_data,
bias_d, tptr, stream);
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx, batch, seq_len, head_number, head_size,
qkptr, bias_qk_d, tptr, scale, T(0.0));
int grid = batch * head_number * seq_len;
int block = head_size;
transpose<T><<<grid, block, 0, stream>>>(tptr, output_d, batch, seq_len,
head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<paddle::platform::CUDADeviceContext, float>);
|
44e435acffb8c3f00e1216e2e95ee812ac0a6abe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaradiusoutlierremoval.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//difference: radius = abs((cuda_in[row]*(0.5-cx)/fx)-(cuda_in[row]*((window*2 + 1)+0.5-cx)/fx));
__global__
void cudaROR(float* cuda_in, float* cuda_out, int window, int numOfNeighbours,int cols, int rows, float fx, float cx){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int sizeofData = rows * cols;
if (row > sizeofData) return;
if(cuda_in[row]<0.0061){
cuda_out[row] = 0;
return;
}
int actual_grid_col = row%cols;
int actual_grid_row = row/cols;
//radius is calculated based on depth of point
double radius = abs((cuda_in[row]*(0.5-cx)/fx)-(cuda_in[row]*((window*2 + 1)+0.5-cx)/fx));
//find boundaries
int start_x = actual_grid_row-window<0?0:actual_grid_row-window,
start_y = actual_grid_col-window<0?0:actual_grid_col-window;
int des_x = actual_grid_row+window>rows?rows:actual_grid_row+window,
des_y = actual_grid_col+window>cols?cols:actual_grid_col+window;
int counter = 0;
//count number of neigbours
for(int i=start_x;i<=des_x;i++){
for(int j=start_y;j<=des_y;j++){
if(cuda_in[i*cols+j]!=0){
double _diff = (cuda_in[i*cols+j] - cuda_in[row]);
double mag = _diff*_diff;
if(mag<radius*radius){
counter++;
}
}
}
}
if(counter<numOfNeighbours+1){
cuda_out[row] = 0;
}
else{
cuda_out[row] = cuda_in[row];
}
}
//difference: radius = window
__global__
void cudaROR(float* cuda_in, float* cuda_out, int window, int numOfNeighbours,int cols, int rows){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int sizeofData = rows * cols;
if (row > sizeofData) return;
if(cuda_in[row]<0.0061){
cuda_out[row] = 0;
return;
}
int actual_grid_col = row%cols;
int actual_grid_row = row/cols;
float radius = window;
//find boundaries
int start_x = actual_grid_row-window<0?0:actual_grid_row-window,
start_y = actual_grid_col-window<0?0:actual_grid_col-window;
int des_x = actual_grid_row+window>rows?rows:actual_grid_row+window,
des_y = actual_grid_col+window>cols?cols:actual_grid_col+window;
int counter = 0;
//count number of neigbours
for(int i=start_x;i<=des_x;i++){
for(int j=start_y;j<=des_y;j++){
if(cuda_in[i*cols+j]!=0){
double _diff = (cuda_in[i*cols+j] - cuda_in[row]);
double mag = _diff*_diff;
if(mag<radius*radius){
counter++;
}
}
}
}
if(counter<numOfNeighbours+1){
cuda_out[row] = 0;
}
else{
cuda_out[row] = cuda_in[row];
}
}
/**************************************************************************************************
* Allocation of memory
* rows, cols - size of input depth map
* ************************************************************************************************/
cudaRadiusOutlierRemoval::cudaRadiusOutlierRemoval(int rows, int cols)
{
this->rows = rows;
this->cols = cols;
gpuErrchk( hipMalloc((void **) &depth, rows*cols*sizeof(double)) );
gpuErrchk( hipMalloc((void **) &out, rows*cols*sizeof(double)) );
}
cudaRadiusOutlierRemoval::~cudaRadiusOutlierRemoval()
{
gpuErrchk( hipFree(depth) );
gpuErrchk( hipFree(out) );
}
/*******************************************************************************/
/*Inputs:
* depths_mat - depth map (types: CV_32F, CV_64F, CV_16U, CV_8U)
* window - size of window in pixels: radius = (window*2 + 1)
* n_neighbours - limit of neigbours for depth pixel
* use_intrinsics - use intrinsic paremeters fx and cx to calculate radius based in the depth
********************************************************************************/
void cudaRadiusOutlierRemoval::filter(cv::Mat &depth_mat, int window, int n_neighbours, bool use_intrinsic,float fx, float cx){
assert(depth_mat.depth() == CV_32F || depth_mat.depth() == CV_64F || depth_mat.depth() == CV_16U || depth_mat.depth() == CV_8U);
int type = depth_mat.depth();
if(depth_mat.depth() == CV_16U){
depth_mat.convertTo(depth_mat,CV_32F,1.0/(pow(2,16)-1),0);
}
else if(depth_mat.depth() == CV_8U){
depth_mat.convertTo(depth_mat,CV_32F,1.0/(pow(2,8)-1),0);
}
else if(depth_mat.depth() == CV_64F){
depth_mat.convertTo(depth_mat,CV_32F);
}
gpuErrchk( hipMemcpy(depth, (float*)depth_mat.data ,this->rows*this->cols*sizeof(float),hipMemcpyHostToDevice) );
int N = this->rows*this->cols;
int BLOCK_SIZE = N<1024?N:1024;
dim3 dimBlock(BLOCK_SIZE,1);
dim3 dimGrid(N/BLOCK_SIZE + 1,1);
//core
if(use_intrinsic){
hipLaunchKernelGGL(( cudaROR), dim3(dimGrid),dim3(dimBlock) , 0, 0, depth,out,window,n_neighbours,cols,rows,fx,cx);
hipDeviceSynchronize();
}
else{
hipLaunchKernelGGL(( cudaROR), dim3(dimGrid),dim3(dimBlock) , 0, 0, depth,out,window,n_neighbours,cols,rows);
hipDeviceSynchronize();
}
gpuErrchk( hipMemcpy((float*)depth_mat.data, out ,this->rows*this->cols*sizeof(float),hipMemcpyDeviceToHost) );
if(type == CV_16U){
depth_mat.convertTo(depth_mat,CV_16U,pow(2,16)-1,0);
}
else if(type == CV_8U){
depth_mat.convertTo(depth_mat,CV_8U,pow(2,8)-1,0);
}
else if(type == CV_64F){
depth_mat.convertTo(depth_mat,CV_64F);
}
}
| 44e435acffb8c3f00e1216e2e95ee812ac0a6abe.cu | #include "cudaradiusoutlierremoval.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//difference: radius = abs((cuda_in[row]*(0.5-cx)/fx)-(cuda_in[row]*((window*2 + 1)+0.5-cx)/fx));
__global__
void cudaROR(float* cuda_in, float* cuda_out, int window, int numOfNeighbours,int cols, int rows, float fx, float cx){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int sizeofData = rows * cols;
if (row > sizeofData) return;
if(cuda_in[row]<0.0061){
cuda_out[row] = 0;
return;
}
int actual_grid_col = row%cols;
int actual_grid_row = row/cols;
//radius is calculated based on depth of point
double radius = abs((cuda_in[row]*(0.5-cx)/fx)-(cuda_in[row]*((window*2 + 1)+0.5-cx)/fx));
//find boundaries
int start_x = actual_grid_row-window<0?0:actual_grid_row-window,
start_y = actual_grid_col-window<0?0:actual_grid_col-window;
int des_x = actual_grid_row+window>rows?rows:actual_grid_row+window,
des_y = actual_grid_col+window>cols?cols:actual_grid_col+window;
int counter = 0;
//count number of neigbours
for(int i=start_x;i<=des_x;i++){
for(int j=start_y;j<=des_y;j++){
if(cuda_in[i*cols+j]!=0){
double _diff = (cuda_in[i*cols+j] - cuda_in[row]);
double mag = _diff*_diff;
if(mag<radius*radius){
counter++;
}
}
}
}
if(counter<numOfNeighbours+1){
cuda_out[row] = 0;
}
else{
cuda_out[row] = cuda_in[row];
}
}
//difference: radius = window
__global__
void cudaROR(float* cuda_in, float* cuda_out, int window, int numOfNeighbours,int cols, int rows){
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int sizeofData = rows * cols;
if (row > sizeofData) return;
if(cuda_in[row]<0.0061){
cuda_out[row] = 0;
return;
}
int actual_grid_col = row%cols;
int actual_grid_row = row/cols;
float radius = window;
//find boundaries
int start_x = actual_grid_row-window<0?0:actual_grid_row-window,
start_y = actual_grid_col-window<0?0:actual_grid_col-window;
int des_x = actual_grid_row+window>rows?rows:actual_grid_row+window,
des_y = actual_grid_col+window>cols?cols:actual_grid_col+window;
int counter = 0;
//count number of neigbours
for(int i=start_x;i<=des_x;i++){
for(int j=start_y;j<=des_y;j++){
if(cuda_in[i*cols+j]!=0){
double _diff = (cuda_in[i*cols+j] - cuda_in[row]);
double mag = _diff*_diff;
if(mag<radius*radius){
counter++;
}
}
}
}
if(counter<numOfNeighbours+1){
cuda_out[row] = 0;
}
else{
cuda_out[row] = cuda_in[row];
}
}
/**************************************************************************************************
* Allocation of memory
* rows, cols - size of input depth map
* ************************************************************************************************/
cudaRadiusOutlierRemoval::cudaRadiusOutlierRemoval(int rows, int cols)
{
this->rows = rows;
this->cols = cols;
gpuErrchk( cudaMalloc((void **) &depth, rows*cols*sizeof(double)) );
gpuErrchk( cudaMalloc((void **) &out, rows*cols*sizeof(double)) );
}
cudaRadiusOutlierRemoval::~cudaRadiusOutlierRemoval()
{
gpuErrchk( cudaFree(depth) );
gpuErrchk( cudaFree(out) );
}
/*******************************************************************************/
/*Inputs:
* depths_mat - depth map (types: CV_32F, CV_64F, CV_16U, CV_8U)
* window - size of window in pixels: radius = (window*2 + 1)
* n_neighbours - limit of neigbours for depth pixel
* use_intrinsics - use intrinsic paremeters fx and cx to calculate radius based in the depth
********************************************************************************/
void cudaRadiusOutlierRemoval::filter(cv::Mat &depth_mat, int window, int n_neighbours, bool use_intrinsic,float fx, float cx){
assert(depth_mat.depth() == CV_32F || depth_mat.depth() == CV_64F || depth_mat.depth() == CV_16U || depth_mat.depth() == CV_8U);
int type = depth_mat.depth();
if(depth_mat.depth() == CV_16U){
depth_mat.convertTo(depth_mat,CV_32F,1.0/(pow(2,16)-1),0);
}
else if(depth_mat.depth() == CV_8U){
depth_mat.convertTo(depth_mat,CV_32F,1.0/(pow(2,8)-1),0);
}
else if(depth_mat.depth() == CV_64F){
depth_mat.convertTo(depth_mat,CV_32F);
}
gpuErrchk( cudaMemcpy(depth, (float*)depth_mat.data ,this->rows*this->cols*sizeof(float),cudaMemcpyHostToDevice) );
int N = this->rows*this->cols;
int BLOCK_SIZE = N<1024?N:1024;
dim3 dimBlock(BLOCK_SIZE,1);
dim3 dimGrid(N/BLOCK_SIZE + 1,1);
//core
if(use_intrinsic){
cudaROR<<< dimGrid,dimBlock >>>(depth,out,window,n_neighbours,cols,rows,fx,cx);
cudaDeviceSynchronize();
}
else{
cudaROR<<< dimGrid,dimBlock >>>(depth,out,window,n_neighbours,cols,rows);
cudaDeviceSynchronize();
}
gpuErrchk( cudaMemcpy((float*)depth_mat.data, out ,this->rows*this->cols*sizeof(float),cudaMemcpyDeviceToHost) );
if(type == CV_16U){
depth_mat.convertTo(depth_mat,CV_16U,pow(2,16)-1,0);
}
else if(type == CV_8U){
depth_mat.convertTo(depth_mat,CV_8U,pow(2,8)-1,0);
}
else if(type == CV_64F){
depth_mat.convertTo(depth_mat,CV_64F);
}
}
|
e473ad028e1ae7ebc7ee0f98f8342bcf0418ac24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel4(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ volatile dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int length = n/2;
if(i < ( n/2 )) {
scratch[threadIdx.x] = g_idata[i] + g_idata[i + length];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x / 2; s > 32; s = s >> 1) {
if( threadIdx.x < s ) {
scratch[ threadIdx.x ] += scratch[ threadIdx.x + s ];
}
__syncthreads ();
}
if (threadIdx.x < 32)
{
if(n > 64) {
scratch[threadIdx.x] += scratch[threadIdx.x + 32];
}
if(n > 32) {
scratch[threadIdx.x] += scratch[threadIdx.x + 16];
}
scratch[threadIdx.x] += scratch[threadIdx.x + 8];
scratch[threadIdx.x] += scratch[threadIdx.x + 4];
scratch[threadIdx.x] += scratch[threadIdx.x + 2];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_4, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 4;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_4 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4);
double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| e473ad028e1ae7ebc7ee0f98f8342bcf0418ac24.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel4(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ volatile dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int length = n/2;
if(i < ( n/2 )) {
scratch[threadIdx.x] = g_idata[i] + g_idata[i + length];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x / 2; s > 32; s = s >> 1) {
if( threadIdx.x < s ) {
scratch[ threadIdx.x ] += scratch[ threadIdx.x + s ];
}
__syncthreads ();
}
if (threadIdx.x < 32)
{
if(n > 64) {
scratch[threadIdx.x] += scratch[threadIdx.x + 32];
}
if(n > 32) {
scratch[threadIdx.x] += scratch[threadIdx.x + 16];
}
scratch[threadIdx.x] += scratch[threadIdx.x + 8];
scratch[threadIdx.x] += scratch[threadIdx.x + 4];
scratch[threadIdx.x] += scratch[threadIdx.x + 2];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_4, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 4;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel4 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel4 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel4 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_4 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4);
double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
4f3eee7a7a89dc6b482cbd948c72ba511e688a4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#define RADIUS 3
#define BLOCK_SIZE_X 8
#define BLOCK_SIZE_Y 8
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**********/
/* KERNEL */
/**********/
__global__ void moving_average(unsigned int *in, unsigned int *out, unsigned int M, unsigned int N) {
__shared__ unsigned int temp[BLOCK_SIZE_Y][BLOCK_SIZE_X + 2 * RADIUS];
unsigned int gindexx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int gindexy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int gindex = gindexy * N + gindexx;
unsigned int lindexx = threadIdx.x + RADIUS;
unsigned int lindexy = threadIdx.y;
// --- Read input elements into shared memory
temp[lindexy][lindexx] = ((gindexx < N)&&(gindexy < M))? in[gindex] : 0;
if (threadIdx.x < RADIUS) {
temp[lindexy][threadIdx.x] = ((gindexx >= RADIUS)&&(gindexx < (N + RADIUS))&&(gindexy < M)) ? in[gindex - RADIUS] : 0;
temp[lindexy][threadIdx.x + (RADIUS + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X))] = (((gindexx + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X)) < N)&&(gindexy < M))? in[gindexy * N + gindexx + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X)] : 0;
if ((threadIdx.y == 0)&&(gindexy < M)&&((gindexx + BLOCK_SIZE_X) < N)&&(gindexy < M)) printf("Inside 2 - tidx = %i; bidx = %i; tidy = %i; bidy = %i; lindexx = %i; temp = %i\n", threadIdx.x, blockIdx.x, threadIdx.y, blockIdx.y, threadIdx.x + (RADIUS + BLOCK_SIZE_X), temp[lindexy][threadIdx.x + (RADIUS + BLOCK_SIZE_X)]);
}
__syncthreads();
// --- Apply the stencil
unsigned int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
result += temp[lindexy][lindexx + offset];
}
// --- Store the result
out[gindexy * N + gindexx] = result;
}
/********/
/* MAIN */
/********/
int main() {
const unsigned int M = 2;
const unsigned int N = 4 + 2 * RADIUS;
const unsigned int constant = 3;
thrust::device_vector<unsigned int> d_in(M * N, constant);
thrust::device_vector<unsigned int> d_out(M * N);
dim3 GridSize(iDivUp(N, BLOCK_SIZE_X), iDivUp(M, BLOCK_SIZE_Y));
dim3 BlockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
hipLaunchKernelGGL(( moving_average), dim3(GridSize), dim3(BlockSize), 0, 0, thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data()), M, N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
thrust::host_vector<unsigned int> h_out = d_out;
for (int j=0; j<M; j++) {
for (int i=0; i<N; i++)
printf("Element j = %i; i = %i; h_out = %i\n", j, i, h_out[N*j+i]);
}
return 0;
}
| 4f3eee7a7a89dc6b482cbd948c72ba511e688a4d.cu | #include <thrust/device_vector.h>
#define RADIUS 3
#define BLOCK_SIZE_X 8
#define BLOCK_SIZE_Y 8
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**********/
/* KERNEL */
/**********/
__global__ void moving_average(unsigned int *in, unsigned int *out, unsigned int M, unsigned int N) {
__shared__ unsigned int temp[BLOCK_SIZE_Y][BLOCK_SIZE_X + 2 * RADIUS];
unsigned int gindexx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int gindexy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int gindex = gindexy * N + gindexx;
unsigned int lindexx = threadIdx.x + RADIUS;
unsigned int lindexy = threadIdx.y;
// --- Read input elements into shared memory
temp[lindexy][lindexx] = ((gindexx < N)&&(gindexy < M))? in[gindex] : 0;
if (threadIdx.x < RADIUS) {
temp[lindexy][threadIdx.x] = ((gindexx >= RADIUS)&&(gindexx < (N + RADIUS))&&(gindexy < M)) ? in[gindex - RADIUS] : 0;
temp[lindexy][threadIdx.x + (RADIUS + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X))] = (((gindexx + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X)) < N)&&(gindexy < M))? in[gindexy * N + gindexx + min(BLOCK_SIZE_X, N - blockIdx.x * BLOCK_SIZE_X)] : 0;
if ((threadIdx.y == 0)&&(gindexy < M)&&((gindexx + BLOCK_SIZE_X) < N)&&(gindexy < M)) printf("Inside 2 - tidx = %i; bidx = %i; tidy = %i; bidy = %i; lindexx = %i; temp = %i\n", threadIdx.x, blockIdx.x, threadIdx.y, blockIdx.y, threadIdx.x + (RADIUS + BLOCK_SIZE_X), temp[lindexy][threadIdx.x + (RADIUS + BLOCK_SIZE_X)]);
}
__syncthreads();
// --- Apply the stencil
unsigned int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
result += temp[lindexy][lindexx + offset];
}
// --- Store the result
out[gindexy * N + gindexx] = result;
}
/********/
/* MAIN */
/********/
int main() {
const unsigned int M = 2;
const unsigned int N = 4 + 2 * RADIUS;
const unsigned int constant = 3;
thrust::device_vector<unsigned int> d_in(M * N, constant);
thrust::device_vector<unsigned int> d_out(M * N);
dim3 GridSize(iDivUp(N, BLOCK_SIZE_X), iDivUp(M, BLOCK_SIZE_Y));
dim3 BlockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
moving_average<<<GridSize, BlockSize>>>(thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data()), M, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
thrust::host_vector<unsigned int> h_out = d_out;
for (int j=0; j<M; j++) {
for (int i=0; i<N; i++)
printf("Element j = %i; i = %i; h_out = %i\n", j, i, h_out[N*j+i]);
}
return 0;
}
|
a4c73640336d0ae123fb992e628f5d80d9359d88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarft_kernels.cu, normal z -> d, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define dgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ double shared_data[];
/******************************************************************************/
static __device__
void dlarft_gemvcolwise_device(
int m, double *v, double *tau,
double *c, int ldc, double *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
double *dc = c + blockIdx.x * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_CONJ(sum[0]);
#else
tmp = - MAGMA_D_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_D_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_D_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
__global__
void dlarft_gemvcolwise_kernel( int m, double *v, int ldv, double *tau,
double *T, int ldt, int step )
{
dlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void dlarft_gemvcolwise_kernel_batched( int m, double **v_array, int ldv, double **tau_array,
double **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
dlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvcolwise(
magma_int_t m, magma_int_t step,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( dlarft_gemvcolwise_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
double **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( dlarft_gemvcolwise_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// dgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
dlarft_gemvrowwise_device(
int m, int i,
double *tau,
double *v_ptr, int ldv,
double *x_ptr, int incx,
double *T_ptr, int ldt,
double *W, double* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
double res = MAGMA_D_ZERO;
v_ptr += ldv * ty;
if (tx < dgemv_bs)
{
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
dlarft_gemvrowwise_kernel(
int m, int i,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
double *W = T +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
dlarft_gemvrowwise_kernel_batched(
int m, int i,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
double *W = T_array[batchid] +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvrowwise(
magma_int_t m, magma_int_t i,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
hipLaunchKernelGGL(( dlarft_gemvrowwise_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
/* dgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
hipLaunchKernelGGL(( dlarft_gemvrowwise_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
dlarft_gemv_loop_inside_device(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
double *sdata = (double*)shared_data;
double res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
double *v_ptr = v;
v_ptr += i;
double *x_ptr = v_ptr + i * ldv;
res = MAGMA_D_ZERO;
if (tx < dgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * dgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * dgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
dlarft_gemv_loop_inside_kernel(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
dlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
dlarft_gemv_loop_inside_kernel_batched(
int n, int k,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
dlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
hipLaunchKernelGGL(( dlarft_gemv_loop_inside_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
hipLaunchKernelGGL(( dlarft_gemv_loop_inside_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
dlarft_dtrmv_sm32x32_device(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_D_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
dlarft_dtrmv_sm32x32_kernel(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
dlarft_dtrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
dlarft_dtrmv_sm32x32_kernel_batched(
int n, int k, double **tau_array,
double **Tin_array, int ldtin, double **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
dlarft_dtrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_dtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Tin, magma_int_t ldtin,
double *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
hipLaunchKernelGGL(( dlarft_dtrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_dtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Tin_array, magma_int_t ldtin,
double **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
hipLaunchKernelGGL(( dlarft_dtrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
dlarft_recdtrmv_sm32x32_device(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_D_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
dlarft_recdtrmv_sm32x32_kernel(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
dlarft_recdtrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
dlarft_recdtrmv_sm32x32_kernel_batched(
int m, int n, double **tau_array,
double **Trec_array, int ldtrec, double **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
dlarft_recdtrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Trec, magma_int_t ldtrec,
double *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
hipLaunchKernelGGL(( dlarft_recdtrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Trec_array, magma_int_t ldtrec,
double **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
hipLaunchKernelGGL(( dlarft_recdtrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
| a4c73640336d0ae123fb992e628f5d80d9359d88.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarft_kernels.cu, normal z -> d, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define dgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ double shared_data[];
/******************************************************************************/
static __device__
void dlarft_gemvcolwise_device(
int m, double *v, double *tau,
double *c, int ldc, double *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
double *dc = c + blockIdx.x * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_CONJ(sum[0]);
#else
tmp = - MAGMA_D_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_D_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_D_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
__global__
void dlarft_gemvcolwise_kernel( int m, double *v, int ldv, double *tau,
double *T, int ldt, int step )
{
dlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void dlarft_gemvcolwise_kernel_batched( int m, double **v_array, int ldv, double **tau_array,
double **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
dlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvcolwise(
magma_int_t m, magma_int_t step,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
dlarft_gemvcolwise_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
double **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
dlarft_gemvcolwise_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// dgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
dlarft_gemvrowwise_device(
int m, int i,
double *tau,
double *v_ptr, int ldv,
double *x_ptr, int incx,
double *T_ptr, int ldt,
double *W, double* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
double res = MAGMA_D_ZERO;
v_ptr += ldv * ty;
if (tx < dgemv_bs)
{
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
dlarft_gemvrowwise_kernel(
int m, int i,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
double *W = T +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
dlarft_gemvrowwise_kernel_batched(
int m, int i,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
double *W = T_array[batchid] +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvrowwise(
magma_int_t m, magma_int_t i,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
dlarft_gemvrowwise_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
/* dgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
dlarft_gemvrowwise_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
dlarft_gemv_loop_inside_device(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
double *sdata = (double*)shared_data;
double res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
double *v_ptr = v;
v_ptr += i;
double *x_ptr = v_ptr + i * ldv;
res = MAGMA_D_ZERO;
if (tx < dgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * dgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * dgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
dlarft_gemv_loop_inside_kernel(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
dlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
dlarft_gemv_loop_inside_kernel_batched(
int n, int k,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
dlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
dlarft_gemv_loop_inside_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
dlarft_gemv_loop_inside_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
dlarft_dtrmv_sm32x32_device(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_D_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
dlarft_dtrmv_sm32x32_kernel(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
dlarft_dtrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
dlarft_dtrmv_sm32x32_kernel_batched(
int n, int k, double **tau_array,
double **Tin_array, int ldtin, double **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
dlarft_dtrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_dtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Tin, magma_int_t ldtin,
double *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
dlarft_dtrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_dtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Tin_array, magma_int_t ldtin,
double **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
dlarft_dtrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
dlarft_recdtrmv_sm32x32_device(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_D_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
dlarft_recdtrmv_sm32x32_kernel(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
dlarft_recdtrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
dlarft_recdtrmv_sm32x32_kernel_batched(
int m, int n, double **tau_array,
double **Trec_array, int ldtrec, double **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
dlarft_recdtrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Trec, magma_int_t ldtrec,
double *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
dlarft_recdtrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Trec_array, magma_int_t ldtrec,
double **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
dlarft_recdtrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
|
81ac20741db9e6e597679292bdfc96b744ff53bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "reduction-mod.cuh"
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "helper_cuda.h"
#define CUDART_INF_F __int_as_float(0x7f800000)
#define THREADS_PER_BLOCK 512U
#define MAX_BLOCK_DIM_SIZE 65535U
bool isPow2(int x)
{
return ((x & (x - 1)) == 0);
}
__device__ void cuMinR(Distance &distA, Distance &distB, int &min_index, int index, int dir)
{
if ((distA.value >= distB.value) == dir)
{
distA = distB;
min_index = index;
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = min(maxBlocks, blocks);
}
template <int blockSize, bool nIsPow2>
__global__ void cuReduce(Distance *g_dist, int n)
{
__shared__ Distance s_dist[blockSize];
__shared__ int s_ind[blockSize];
int dir = 1;
Distance min_dist = {1, CUDART_INF_F};
int min_index = 0;
int tid = threadIdx.x;
int i = blockIdx.x * blockSize * 2 + threadIdx.x;
int gridSize = blockSize * 2 * gridDim.x;
while (i < n)
{
cuMinR(min_dist, g_dist[i] , min_index, i, dir);
if (nIsPow2 || i + blockSize < n)
{
cuMinR(min_dist, g_dist[i + blockSize], min_index, i + blockSize , dir);
}
i += gridSize;
}
s_dist[tid] = min_dist;
s_ind[tid] = min_index;
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
cuMinR(min_dist, s_dist[tid + 256], min_index, s_ind[tid + 256] , dir);
s_dist[tid] = min_dist;
s_ind[tid] = min_index;
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
cuMinR(min_dist, s_dist[tid + 128], min_index, s_ind[tid + 128] , dir);
s_ind[tid] = min_index;
s_dist[tid] = min_dist;
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
cuMinR(min_dist, s_dist[tid + 64], min_index, s_ind[tid + 64] , dir);
s_ind[tid] = min_index;
s_dist[tid] = min_dist;
}
__syncthreads();
}
if (tid < 32)
{
volatile int *v_ind = s_ind;
volatile Distance *v_dist = s_dist;
if (blockSize >= 64)
{
if ((min_dist.value >= v_dist[tid + 32].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 32];
min_index = v_ind[tid] = v_ind[tid + 32];
}
}
if (blockSize >= 32)
{
if ((min_dist.value >= v_dist[tid + 16].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 16];
min_index = v_ind[tid] = v_ind[tid + 16];
}
}
if (blockSize >= 16)
{
if ((min_dist.value >= v_dist[tid + 8].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 8];
min_index = v_ind[tid] = v_ind[tid + 8];
}
}
if (blockSize >= 8)
{
if ((min_dist.value >= v_dist[tid + 4].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 4];
min_index = v_ind[tid] = v_ind[tid + 4];
}
}
if (blockSize >= 4)
{
if ((min_dist.value >= v_dist[tid + 2].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 2];
min_index = v_ind[tid] = v_ind[tid + 2];
}
}
if (blockSize >= 2)
{
if ((min_dist.value >= v_dist[tid + 1].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 1];
min_index = v_ind[tid] = v_ind[tid + 1];
}
}
}
if (tid == 0)
{
i = blockIdx.x;
min_dist = g_dist[i];
g_dist[i] = g_dist[s_ind[tid]];
g_dist[s_ind[tid]] = min_dist;
}
}
void reduce(int size, int threads, int blocks, Distance *g_dist)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(Distance) + sizeof(int)) : threads * (sizeof(Distance) + sizeof(int));
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( cuReduce< 512, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 256:
hipLaunchKernelGGL(( cuReduce< 256, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 128:
hipLaunchKernelGGL(( cuReduce< 128, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 64:
hipLaunchKernelGGL(( cuReduce< 64, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 32:
hipLaunchKernelGGL(( cuReduce< 32, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 16:
hipLaunchKernelGGL(( cuReduce< 16, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 8:
hipLaunchKernelGGL(( cuReduce< 8, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 4:
hipLaunchKernelGGL(( cuReduce< 4, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 2:
hipLaunchKernelGGL(( cuReduce< 2, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 1:
hipLaunchKernelGGL(( cuReduce< 1, true>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( cuReduce< 512, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 256:
hipLaunchKernelGGL(( cuReduce< 256, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 128:
hipLaunchKernelGGL(( cuReduce< 128, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 64:
hipLaunchKernelGGL(( cuReduce< 64, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 32:
hipLaunchKernelGGL(( cuReduce< 32, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 16:
hipLaunchKernelGGL(( cuReduce< 16, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 8:
hipLaunchKernelGGL(( cuReduce< 8, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 4:
hipLaunchKernelGGL(( cuReduce< 4, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 2:
hipLaunchKernelGGL(( cuReduce< 2, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
case 1:
hipLaunchKernelGGL(( cuReduce< 1, false>) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, g_dist, size); break;
}
}
}
void dist_min_reduce(Distance *g_dist, int n)
{
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(n, MAX_BLOCK_DIM_SIZE, THREADS_PER_BLOCK, numBlocks, numThreads);
reduce(n, numThreads, numBlocks, g_dist);
n = numBlocks;
while (n > 1)
{
getNumBlocksAndThreads(n, MAX_BLOCK_DIM_SIZE, THREADS_PER_BLOCK, numBlocks, numThreads);
reduce(n, numThreads, numBlocks, g_dist);
n = numBlocks;
}
}
| 81ac20741db9e6e597679292bdfc96b744ff53bc.cu | #include "reduction-mod.cuh"
#include "cuda.h"
#include "stdio.h"
#include "helper_cuda.h"
#define CUDART_INF_F __int_as_float(0x7f800000)
#define THREADS_PER_BLOCK 512U
#define MAX_BLOCK_DIM_SIZE 65535U
bool isPow2(int x)
{
return ((x & (x - 1)) == 0);
}
__device__ void cuMinR(Distance &distA, Distance &distB, int &min_index, int index, int dir)
{
if ((distA.value >= distB.value) == dir)
{
distA = distB;
min_index = index;
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = min(maxBlocks, blocks);
}
template <int blockSize, bool nIsPow2>
__global__ void cuReduce(Distance *g_dist, int n)
{
__shared__ Distance s_dist[blockSize];
__shared__ int s_ind[blockSize];
int dir = 1;
Distance min_dist = {1, CUDART_INF_F};
int min_index = 0;
int tid = threadIdx.x;
int i = blockIdx.x * blockSize * 2 + threadIdx.x;
int gridSize = blockSize * 2 * gridDim.x;
while (i < n)
{
cuMinR(min_dist, g_dist[i] , min_index, i, dir);
if (nIsPow2 || i + blockSize < n)
{
cuMinR(min_dist, g_dist[i + blockSize], min_index, i + blockSize , dir);
}
i += gridSize;
}
s_dist[tid] = min_dist;
s_ind[tid] = min_index;
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
cuMinR(min_dist, s_dist[tid + 256], min_index, s_ind[tid + 256] , dir);
s_dist[tid] = min_dist;
s_ind[tid] = min_index;
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
cuMinR(min_dist, s_dist[tid + 128], min_index, s_ind[tid + 128] , dir);
s_ind[tid] = min_index;
s_dist[tid] = min_dist;
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
cuMinR(min_dist, s_dist[tid + 64], min_index, s_ind[tid + 64] , dir);
s_ind[tid] = min_index;
s_dist[tid] = min_dist;
}
__syncthreads();
}
if (tid < 32)
{
volatile int *v_ind = s_ind;
volatile Distance *v_dist = s_dist;
if (blockSize >= 64)
{
if ((min_dist.value >= v_dist[tid + 32].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 32];
min_index = v_ind[tid] = v_ind[tid + 32];
}
}
if (blockSize >= 32)
{
if ((min_dist.value >= v_dist[tid + 16].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 16];
min_index = v_ind[tid] = v_ind[tid + 16];
}
}
if (blockSize >= 16)
{
if ((min_dist.value >= v_dist[tid + 8].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 8];
min_index = v_ind[tid] = v_ind[tid + 8];
}
}
if (blockSize >= 8)
{
if ((min_dist.value >= v_dist[tid + 4].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 4];
min_index = v_ind[tid] = v_ind[tid + 4];
}
}
if (blockSize >= 4)
{
if ((min_dist.value >= v_dist[tid + 2].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 2];
min_index = v_ind[tid] = v_ind[tid + 2];
}
}
if (blockSize >= 2)
{
if ((min_dist.value >= v_dist[tid + 1].value) == dir)
{
min_dist = v_dist[tid] = v_dist[tid + 1];
min_index = v_ind[tid] = v_ind[tid + 1];
}
}
}
if (tid == 0)
{
i = blockIdx.x;
min_dist = g_dist[i];
g_dist[i] = g_dist[s_ind[tid]];
g_dist[s_ind[tid]] = min_dist;
}
}
void reduce(int size, int threads, int blocks, Distance *g_dist)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(Distance) + sizeof(int)) : threads * (sizeof(Distance) + sizeof(int));
if (isPow2(size))
{
switch (threads)
{
case 512:
cuReduce< 512, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 256:
cuReduce< 256, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 128:
cuReduce< 128, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 64:
cuReduce< 64, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 32:
cuReduce< 32, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 16:
cuReduce< 16, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 8:
cuReduce< 8, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 4:
cuReduce< 4, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 2:
cuReduce< 2, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 1:
cuReduce< 1, true> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
}
}
else
{
switch (threads)
{
case 512:
cuReduce< 512, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 256:
cuReduce< 256, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 128:
cuReduce< 128, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 64:
cuReduce< 64, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 32:
cuReduce< 32, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 16:
cuReduce< 16, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 8:
cuReduce< 8, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 4:
cuReduce< 4, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 2:
cuReduce< 2, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
case 1:
cuReduce< 1, false> <<< dimGrid, dimBlock, smemSize >>>(g_dist, size); break;
}
}
}
void dist_min_reduce(Distance *g_dist, int n)
{
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads(n, MAX_BLOCK_DIM_SIZE, THREADS_PER_BLOCK, numBlocks, numThreads);
reduce(n, numThreads, numBlocks, g_dist);
n = numBlocks;
while (n > 1)
{
getNumBlocksAndThreads(n, MAX_BLOCK_DIM_SIZE, THREADS_PER_BLOCK, numBlocks, numThreads);
reduce(n, numThreads, numBlocks, g_dist);
n = numBlocks;
}
}
|
09a6fdf10556d7d6a26a82014ac9b683852f5068.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_find_hac(const int Nc, const int Nd, const double* g_heat, double* g_hac)
{
//<<<Nc, 128>>>
__shared__ double s_hac_xi[128];
__shared__ double s_hac_xo[128];
__shared__ double s_hac_yi[128];
__shared__ double s_hac_yo[128];
__shared__ double s_hac_z[128];
int tid = threadIdx.x;
int bid = blockIdx.x;
int number_of_patches = (Nd - 1) / 128 + 1;
int number_of_data = Nd - bid;
s_hac_xi[tid] = 0.0;
s_hac_xo[tid] = 0.0;
s_hac_yi[tid] = 0.0;
s_hac_yo[tid] = 0.0;
s_hac_z[tid] = 0.0;
for (int patch = 0; patch < number_of_patches; ++patch) {
int index = tid + patch * 128;
if (index + bid < Nd) {
s_hac_xi[tid] += g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 0] +
g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 1];
s_hac_xo[tid] += g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 1] +
g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 0];
s_hac_yi[tid] += g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 2] +
g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 3];
s_hac_yo[tid] += g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 3] +
g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 2];
s_hac_z[tid] += g_heat[index + Nd * 4] * g_heat[index + bid + Nd * 4];
}
}
__syncthreads();
#pragma unroll
for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) {
if (tid < offset) {
s_hac_xi[tid] += s_hac_xi[tid + offset];
s_hac_xo[tid] += s_hac_xo[tid + offset];
s_hac_yi[tid] += s_hac_yi[tid + offset];
s_hac_yo[tid] += s_hac_yo[tid + offset];
s_hac_z[tid] += s_hac_z[tid + offset];
}
__syncthreads();
}
if (tid == 0) {
g_hac[bid + Nc * 0] = s_hac_xi[0] / number_of_data;
g_hac[bid + Nc * 1] = s_hac_xo[0] / number_of_data;
g_hac[bid + Nc * 2] = s_hac_yi[0] / number_of_data;
g_hac[bid + Nc * 3] = s_hac_yo[0] / number_of_data;
g_hac[bid + Nc * 4] = s_hac_z[0] / number_of_data;
}
} | 09a6fdf10556d7d6a26a82014ac9b683852f5068.cu | #include "includes.h"
__global__ void gpu_find_hac(const int Nc, const int Nd, const double* g_heat, double* g_hac)
{
//<<<Nc, 128>>>
__shared__ double s_hac_xi[128];
__shared__ double s_hac_xo[128];
__shared__ double s_hac_yi[128];
__shared__ double s_hac_yo[128];
__shared__ double s_hac_z[128];
int tid = threadIdx.x;
int bid = blockIdx.x;
int number_of_patches = (Nd - 1) / 128 + 1;
int number_of_data = Nd - bid;
s_hac_xi[tid] = 0.0;
s_hac_xo[tid] = 0.0;
s_hac_yi[tid] = 0.0;
s_hac_yo[tid] = 0.0;
s_hac_z[tid] = 0.0;
for (int patch = 0; patch < number_of_patches; ++patch) {
int index = tid + patch * 128;
if (index + bid < Nd) {
s_hac_xi[tid] += g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 0] +
g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 1];
s_hac_xo[tid] += g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 1] +
g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 0];
s_hac_yi[tid] += g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 2] +
g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 3];
s_hac_yo[tid] += g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 3] +
g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 2];
s_hac_z[tid] += g_heat[index + Nd * 4] * g_heat[index + bid + Nd * 4];
}
}
__syncthreads();
#pragma unroll
for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) {
if (tid < offset) {
s_hac_xi[tid] += s_hac_xi[tid + offset];
s_hac_xo[tid] += s_hac_xo[tid + offset];
s_hac_yi[tid] += s_hac_yi[tid + offset];
s_hac_yo[tid] += s_hac_yo[tid + offset];
s_hac_z[tid] += s_hac_z[tid + offset];
}
__syncthreads();
}
if (tid == 0) {
g_hac[bid + Nc * 0] = s_hac_xi[0] / number_of_data;
g_hac[bid + Nc * 1] = s_hac_xo[0] / number_of_data;
g_hac[bid + Nc * 2] = s_hac_yi[0] / number_of_data;
g_hac[bid + Nc * 3] = s_hac_yo[0] / number_of_data;
g_hac[bid + Nc * 4] = s_hac_z[0] / number_of_data;
}
} |
bd0bb9acbf382afc16d83d5af3ccdd8755c0643f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "compute.h"
#include "circle.h"
int main(int argc, char **argv)
{
int n = 3;
int nbytes = n*sizeof(double);
double *d_a = 0;
hipMalloc(&d_a, nbytes);
double *data = (double *)malloc(nbytes);
for (int i=0; i < n; ++i)
{
data[i] = (double)(i+1);
}
hipMemcpy((void *)d_a, (void *)data, nbytes, hipMemcpyHostToDevice);
printf("Calling kernel\n");
hipLaunchKernelGGL(( compute), dim3(16),dim3(16), 0, 0, d_a, d_a, nbytes);
hipDeviceSynchronize();
printf("done\n");
circle();
return 0;
}
| bd0bb9acbf382afc16d83d5af3ccdd8755c0643f.cu |
#include <stdio.h>
#include <stdlib.h>
#include "compute.h"
#include "circle.h"
int main(int argc, char **argv)
{
int n = 3;
int nbytes = n*sizeof(double);
double *d_a = 0;
cudaMalloc(&d_a, nbytes);
double *data = (double *)malloc(nbytes);
for (int i=0; i < n; ++i)
{
data[i] = (double)(i+1);
}
cudaMemcpy((void *)d_a, (void *)data, nbytes, cudaMemcpyHostToDevice);
printf("Calling kernel\n");
compute<<<16,16>>>(d_a, d_a, nbytes);
cudaDeviceSynchronize();
printf("done\n");
circle();
return 0;
}
|
8e4cac4d382888bb9e594ecb4346e1bfea73c00a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("SUCCESS! All values added correctly.\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
checkCuda( hipMallocManaged(&a, size) );
checkCuda( hipMallocManaged(&b, size) );
checkCuda( hipMallocManaged(&c, size) );
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
checkCuda( hipGetLastError() );
checkCuda( hipDeviceSynchronize() );
checkElementsAre(7, c, N);
checkCuda( hipFree(a) );
checkCuda( hipFree(b) );
checkCuda( hipFree(c) );
}
| 8e4cac4d382888bb9e594ecb4346e1bfea73c00a.cu | #include <stdio.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("SUCCESS! All values added correctly.\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
checkCuda( cudaMallocManaged(&a, size) );
checkCuda( cudaMallocManaged(&b, size) );
checkCuda( cudaMallocManaged(&c, size) );
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
checkCuda( cudaGetLastError() );
checkCuda( cudaDeviceSynchronize() );
checkElementsAre(7, c, N);
checkCuda( cudaFree(a) );
checkCuda( cudaFree(b) );
checkCuda( cudaFree(c) );
}
|
25deac4bea5b90854d51a9d7a58d37481704b2ba.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2016 Rory mitchell
*/
#include "gpu_builder.cuh"
#include <stdio.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <hipcub/hipcub.hpp>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <vector>
#include "hip_helpers.cuh"
#include "find_split.cuh"
#include "types_functions.cuh"
namespace xgboost {
namespace tree {
struct GPUData {
GPUData() : allocated(false), n_features(0), n_instances(0) {}
bool allocated;
int n_features;
int n_instances;
GPUTrainingParam param;
CubMemory cub_mem;
thrust::device_vector<float> fvalues;
thrust::device_vector<int> foffsets;
thrust::device_vector<bst_uint> instance_id;
thrust::device_vector<int> feature_id;
thrust::device_vector<NodeIdT> node_id;
thrust::device_vector<NodeIdT> node_id_temp;
thrust::device_vector<NodeIdT> node_id_instance;
thrust::device_vector<NodeIdT> node_id_instance_temp;
thrust::device_vector<gpu_gpair> gpair;
thrust::device_vector<Node> nodes;
thrust::device_vector<Split> split_candidates;
thrust::device_vector<Item> items;
thrust::device_vector<Item> items_temp;
thrust::device_vector<gpu_gpair> node_sums;
thrust::device_vector<int> node_offsets;
thrust::device_vector<int> sort_index_in;
thrust::device_vector<int> sort_index_out;
void Init(const std::vector<float> &in_fvalues,
const std::vector<int> &in_foffsets,
const std::vector<bst_uint> &in_instance_id,
const std::vector<int> &in_feature_id,
const std::vector<bst_gpair> &in_gpair, bst_uint n_instances_in,
bst_uint n_features_in, int max_depth, const TrainParam ¶m_in) {
Timer t;
n_features = n_features_in;
n_instances = n_instances_in;
fvalues = in_fvalues;
foffsets = in_foffsets;
instance_id = in_instance_id;
feature_id = in_feature_id;
param = GPUTrainingParam(param_in.min_child_weight, param_in.reg_lambda,
param_in.reg_alpha, param_in.max_delta_step);
gpair = thrust::device_vector<gpu_gpair>(in_gpair.begin(), in_gpair.end());
uint32_t max_nodes_level = 1 << max_depth;
node_sums = thrust::device_vector<gpu_gpair>(max_nodes_level * n_features);
node_offsets = thrust::device_vector<int>(max_nodes_level * n_features);
node_id_instance = thrust::device_vector<NodeIdT>(n_instances, 0);
node_id = thrust::device_vector<NodeIdT>(fvalues.size(), 0);
node_id_temp = thrust::device_vector<NodeIdT>(fvalues.size());
uint32_t max_nodes = (1 << (max_depth + 1)) - 1;
nodes = thrust::device_vector<Node>(max_nodes);
split_candidates =
thrust::device_vector<Split>(max_nodes_level * n_features);
allocated = true;
// Init items
items = thrust::device_vector<Item>(fvalues.size());
items_temp = thrust::device_vector<Item>(fvalues.size());
sort_index_in = thrust::device_vector<int>(fvalues.size());
sort_index_out = thrust::device_vector<int>(fvalues.size());
this->CreateItems();
}
~GPUData() {}
// Create items array using gpair, instaoce_id, fvalue
void CreateItems() {
auto d_items = items.data();
auto d_instance_id = instance_id.data();
auto d_gpair = gpair.data();
auto d_fvalue = fvalues.data();
auto counting = thrust::make_counting_iterator<bst_uint>(0);
thrust::for_each(counting, counting + fvalues.size(),
[=] __device__(bst_uint i) {
Item item;
item.instance_id = d_instance_id[i];
item.fvalue = d_fvalue[i];
item.gpair = d_gpair[item.instance_id];
d_items[i] = item;
});
}
// Reset memory for new boosting iteration
void Reset(const std::vector<bst_gpair> &in_gpair,
const std::vector<float> &in_fvalues,
const std::vector<bst_uint> &in_instance_id) {
CHECK(allocated);
thrust::copy(in_gpair.begin(), in_gpair.end(), gpair.begin());
thrust::fill(nodes.begin(), nodes.end(), Node());
thrust::fill(node_id_instance.begin(), node_id_instance.end(), 0);
thrust::fill(node_id.begin(), node_id.end(), 0);
this->CreateItems();
}
bool IsAllocated() { return allocated; }
// Gather from node_id_instance into node_id according to instance_id
void GatherNodeId() {
// Update node_id for each item
auto d_items = items.data();
auto d_node_id = node_id.data();
auto d_node_id_instance = node_id_instance.data();
auto counting = thrust::make_counting_iterator<bst_uint>(0);
thrust::for_each(counting, counting + fvalues.size(),
[=] __device__(bst_uint i) {
Item item = d_items[i];
d_node_id[i] = d_node_id_instance[item.instance_id];
});
}
};
GPUBuilder::GPUBuilder() { gpu_data = new GPUData(); }
void GPUBuilder::Init(const TrainParam ¶m_in) { param = param_in; }
GPUBuilder::~GPUBuilder() { delete gpu_data; }
template <int ITEMS_PER_THREAD, typename OffsetT>
__global__ void update_nodeid_missing_kernel(NodeIdT *d_node_id_instance,
Node *d_nodes, const OffsetT n) {
for (auto i : grid_stride_range(OffsetT(0), n)) {
NodeIdT item_node_id = d_node_id_instance[i];
if (item_node_id < 0) {
continue;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
d_node_id_instance[i] = -1;
} else if (node.split.missing_left) {
d_node_id_instance[i] = item_node_id * 2 + 1;
} else {
d_node_id_instance[i] = item_node_id * 2 + 2;
}
}
}
__device__ void load_as_words(const int n_nodes, Node *d_nodes, Node *s_nodes) {
const int upper_range = n_nodes * (sizeof(Node) / sizeof(int));
for (auto i : block_stride_range(0, upper_range)) {
reinterpret_cast<int *>(s_nodes)[i] = reinterpret_cast<int *>(d_nodes)[i];
}
}
template <int ITEMS_PER_THREAD>
__global__ void
update_nodeid_fvalue_kernel(NodeIdT *d_node_id, NodeIdT *d_node_id_instance,
Item *d_items, Node *d_nodes, const int n_nodes,
const int *d_feature_id, const size_t n,
const int n_features, bool cache_nodes) {
// Load nodes into shared memory
extern __shared__ Node s_nodes[];
if (cache_nodes) {
load_as_words(n_nodes, d_nodes, s_nodes);
__syncthreads();
}
for (auto i : grid_stride_range(size_t(0), n)) {
Item item = d_items[i];
NodeIdT item_node_id = d_node_id[i];
if (item_node_id < 0) {
continue;
}
Node node = cache_nodes ? s_nodes[item_node_id] : d_nodes[item_node_id];
if (node.IsLeaf()) {
continue;
}
int feature_id = d_feature_id[i];
if (feature_id == node.split.findex) {
if (item.fvalue < node.split.fvalue) {
d_node_id_instance[item.instance_id] = item_node_id * 2 + 1;
} else {
d_node_id_instance[item.instance_id] = item_node_id * 2 + 2;
}
}
}
}
void GPUBuilder::UpdateNodeId(int level) {
// Update all nodes based on missing direction
{
const bst_uint n = gpu_data->node_id_instance.size();
const bst_uint ITEMS_PER_THREAD = 8;
const bst_uint BLOCK_THREADS = 256;
const bst_uint GRID_SIZE =
div_round_up(n, ITEMS_PER_THREAD * BLOCK_THREADS);
hipLaunchKernelGGL(( update_nodeid_missing_kernel<
ITEMS_PER_THREAD>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
raw(gpu_data->node_id_instance), raw(gpu_data->nodes), n);
safe_cuda(hipDeviceSynchronize());
}
// Update node based on fvalue where exists
{
const bst_uint n = gpu_data->fvalues.size();
const bst_uint ITEMS_PER_THREAD = 4;
const bst_uint BLOCK_THREADS = 256;
const bst_uint GRID_SIZE =
div_round_up(n, ITEMS_PER_THREAD * BLOCK_THREADS);
// Use smem cache version if possible
const bool cache_nodes = level < 7;
int n_nodes = (1 << (level + 1)) - 1;
int smem_size = cache_nodes ? sizeof(Node) * n_nodes : 0;
hipLaunchKernelGGL(( update_nodeid_fvalue_kernel<
ITEMS_PER_THREAD>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), smem_size, 0,
raw(gpu_data->node_id), raw(gpu_data->node_id_instance),
raw(gpu_data->items), raw(gpu_data->nodes), n_nodes,
raw(gpu_data->feature_id), gpu_data->fvalues.size(),
gpu_data->n_features, cache_nodes);
safe_cuda(hipGetLastError());
safe_cuda(hipDeviceSynchronize());
}
gpu_data->GatherNodeId();
}
void GPUBuilder::Sort(int level) {
thrust::sequence(gpu_data->sort_index_in.begin(),
gpu_data->sort_index_in.end());
if (!gpu_data->cub_mem.IsAllocated()) {
hipcub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.d_temp_storage, gpu_data->cub_mem.temp_storage_bytes,
raw(gpu_data->node_id), raw(gpu_data->node_id_temp),
raw(gpu_data->sort_index_in), raw(gpu_data->sort_index_out),
gpu_data->fvalues.size(), gpu_data->n_features, raw(gpu_data->foffsets),
raw(gpu_data->foffsets) + 1);
gpu_data->cub_mem.Allocate();
}
hipcub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.d_temp_storage, gpu_data->cub_mem.temp_storage_bytes,
raw(gpu_data->node_id), raw(gpu_data->node_id_temp),
raw(gpu_data->sort_index_in), raw(gpu_data->sort_index_out),
gpu_data->fvalues.size(), gpu_data->n_features, raw(gpu_data->foffsets),
raw(gpu_data->foffsets) + 1);
thrust::gather(gpu_data->sort_index_out.begin(),
gpu_data->sort_index_out.end(), gpu_data->items.begin(),
gpu_data->items_temp.begin());
thrust::copy(gpu_data->items_temp.begin(), gpu_data->items_temp.end(),
gpu_data->items.begin());
thrust::copy(gpu_data->node_id_temp.begin(), gpu_data->node_id_temp.end(),
gpu_data->node_id.begin());
}
void GPUBuilder::Update(const std::vector<bst_gpair> &gpair, DMatrix *p_fmat,
RegTree *p_tree) {
try {
Timer update;
Timer t;
this->InitData(gpair, *p_fmat, *p_tree);
t.printElapsed("init data");
this->InitFirstNode();
for (int level = 0; level < param.max_depth; level++) {
bool use_multiscan_algorithm = level < multiscan_levels;
t.reset();
if (level > 0) {
Timer update_node;
this->UpdateNodeId(level);
update_node.printElapsed("node");
}
if (level > 0 && !use_multiscan_algorithm) {
Timer s;
this->Sort(level);
s.printElapsed("sort");
}
Timer split;
find_split(raw(gpu_data->items), raw(gpu_data->split_candidates),
raw(gpu_data->node_id), raw(gpu_data->nodes),
(bst_uint)gpu_data->fvalues.size(), gpu_data->n_features,
raw(gpu_data->foffsets), raw(gpu_data->node_sums),
raw(gpu_data->node_offsets), gpu_data->param, level,
use_multiscan_algorithm);
split.printElapsed("split");
t.printElapsed("level");
}
this->CopyTree(*p_tree);
update.printElapsed("update");
} catch (thrust::system_error &e) {
std::cerr << "CUDA error: " << e.what() << std::endl;
exit(-1);
}
}
void GPUBuilder::InitData(const std::vector<bst_gpair> &gpair, DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "ColMaker: can only grow new tree";
CHECK(fmat.SingleColBlock()) << "GPUMaker: must have single column block";
if (gpu_data->IsAllocated()) {
gpu_data->Reset(gpair, fvalues, instance_id);
return;
}
Timer t;
MetaInfo info = fmat.info();
dmlc::DataIter<ColBatch> *iter = fmat.ColIterator();
std::vector<int> foffsets;
foffsets.push_back(0);
std::vector<int> feature_id;
fvalues.reserve(info.num_col * info.num_row);
instance_id.reserve(info.num_col * info.num_row);
feature_id.reserve(info.num_col * info.num_row);
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst &col = batch[i];
for (const ColBatch::Entry *it = col.data; it != col.data + col.length;
it++) {
fvalues.push_back(it->fvalue);
instance_id.push_back(it->index);
feature_id.push_back(i);
}
foffsets.push_back(fvalues.size());
}
}
t.printElapsed("dmatrix");
t.reset();
gpu_data->Init(fvalues, foffsets, instance_id, feature_id, gpair,
info.num_row, info.num_col, param.max_depth, param);
t.printElapsed("gpu init");
}
void GPUBuilder::InitFirstNode() {
// Build the root node on the CPU and copy to device
gpu_gpair sum_gradients =
thrust::reduce(gpu_data->gpair.begin(), gpu_data->gpair.end(),
gpu_gpair(0, 0), hipcub::Sum());
gpu_data->nodes[0] = Node(
sum_gradients,
CalcGain(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()),
CalcWeight(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()));
}
enum NodeType {
NODE = 0,
LEAF = 1,
UNUSED = 2,
};
// Recursively label node types
void flag_nodes(const thrust::host_vector<Node> &nodes,
std::vector<NodeType> *node_flags, int nid, NodeType type) {
if (nid >= nodes.size() || type == UNUSED) {
return;
}
const Node &n = nodes[nid];
// Current node and all children are valid
if (n.split.loss_chg > rt_eps) {
(*node_flags)[nid] = NODE;
flag_nodes(nodes, node_flags, nid * 2 + 1, NODE);
flag_nodes(nodes, node_flags, nid * 2 + 2, NODE);
} else {
// Current node is leaf, therefore is valid but all children are invalid
(*node_flags)[nid] = LEAF;
flag_nodes(nodes, node_flags, nid * 2 + 1, UNUSED);
flag_nodes(nodes, node_flags, nid * 2 + 2, UNUSED);
}
}
// Copy gpu dense representation of tree to xgboost sparse representation
void GPUBuilder::CopyTree(RegTree &tree) {
thrust::host_vector<Node> h_nodes = gpu_data->nodes;
std::vector<NodeType> node_flags(h_nodes.size(), UNUSED);
flag_nodes(h_nodes, &node_flags, 0, NODE);
int nid = 0;
for (int gpu_nid = 0; gpu_nid < h_nodes.size(); gpu_nid++) {
NodeType flag = node_flags[gpu_nid];
const Node &n = h_nodes[gpu_nid];
if (flag == NODE) {
tree.AddChilds(nid);
tree[nid].set_split(n.split.findex, n.split.fvalue, n.split.missing_left);
tree.stat(nid).loss_chg = n.split.loss_chg;
tree.stat(nid).base_weight = n.weight;
tree.stat(nid).sum_hess = n.sum_gradients.hess();
tree[tree[nid].cleft()].set_leaf(0);
tree[tree[nid].cright()].set_leaf(0);
nid++;
} else if (flag == LEAF) {
tree[nid].set_leaf(n.weight * param.learning_rate);
tree.stat(nid).sum_hess = n.sum_gradients.hess();
nid++;
}
}
}
} // namespace tree
} // namespace xgboost
| 25deac4bea5b90854d51a9d7a58d37481704b2ba.cu | /*!
* Copyright 2016 Rory mitchell
*/
#include "gpu_builder.cuh"
#include <stdio.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <cub/cub.cuh>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <vector>
#include "cuda_helpers.cuh"
#include "find_split.cuh"
#include "types_functions.cuh"
namespace xgboost {
namespace tree {
struct GPUData {
GPUData() : allocated(false), n_features(0), n_instances(0) {}
bool allocated;
int n_features;
int n_instances;
GPUTrainingParam param;
CubMemory cub_mem;
thrust::device_vector<float> fvalues;
thrust::device_vector<int> foffsets;
thrust::device_vector<bst_uint> instance_id;
thrust::device_vector<int> feature_id;
thrust::device_vector<NodeIdT> node_id;
thrust::device_vector<NodeIdT> node_id_temp;
thrust::device_vector<NodeIdT> node_id_instance;
thrust::device_vector<NodeIdT> node_id_instance_temp;
thrust::device_vector<gpu_gpair> gpair;
thrust::device_vector<Node> nodes;
thrust::device_vector<Split> split_candidates;
thrust::device_vector<Item> items;
thrust::device_vector<Item> items_temp;
thrust::device_vector<gpu_gpair> node_sums;
thrust::device_vector<int> node_offsets;
thrust::device_vector<int> sort_index_in;
thrust::device_vector<int> sort_index_out;
void Init(const std::vector<float> &in_fvalues,
const std::vector<int> &in_foffsets,
const std::vector<bst_uint> &in_instance_id,
const std::vector<int> &in_feature_id,
const std::vector<bst_gpair> &in_gpair, bst_uint n_instances_in,
bst_uint n_features_in, int max_depth, const TrainParam ¶m_in) {
Timer t;
n_features = n_features_in;
n_instances = n_instances_in;
fvalues = in_fvalues;
foffsets = in_foffsets;
instance_id = in_instance_id;
feature_id = in_feature_id;
param = GPUTrainingParam(param_in.min_child_weight, param_in.reg_lambda,
param_in.reg_alpha, param_in.max_delta_step);
gpair = thrust::device_vector<gpu_gpair>(in_gpair.begin(), in_gpair.end());
uint32_t max_nodes_level = 1 << max_depth;
node_sums = thrust::device_vector<gpu_gpair>(max_nodes_level * n_features);
node_offsets = thrust::device_vector<int>(max_nodes_level * n_features);
node_id_instance = thrust::device_vector<NodeIdT>(n_instances, 0);
node_id = thrust::device_vector<NodeIdT>(fvalues.size(), 0);
node_id_temp = thrust::device_vector<NodeIdT>(fvalues.size());
uint32_t max_nodes = (1 << (max_depth + 1)) - 1;
nodes = thrust::device_vector<Node>(max_nodes);
split_candidates =
thrust::device_vector<Split>(max_nodes_level * n_features);
allocated = true;
// Init items
items = thrust::device_vector<Item>(fvalues.size());
items_temp = thrust::device_vector<Item>(fvalues.size());
sort_index_in = thrust::device_vector<int>(fvalues.size());
sort_index_out = thrust::device_vector<int>(fvalues.size());
this->CreateItems();
}
~GPUData() {}
// Create items array using gpair, instaoce_id, fvalue
void CreateItems() {
auto d_items = items.data();
auto d_instance_id = instance_id.data();
auto d_gpair = gpair.data();
auto d_fvalue = fvalues.data();
auto counting = thrust::make_counting_iterator<bst_uint>(0);
thrust::for_each(counting, counting + fvalues.size(),
[=] __device__(bst_uint i) {
Item item;
item.instance_id = d_instance_id[i];
item.fvalue = d_fvalue[i];
item.gpair = d_gpair[item.instance_id];
d_items[i] = item;
});
}
// Reset memory for new boosting iteration
void Reset(const std::vector<bst_gpair> &in_gpair,
const std::vector<float> &in_fvalues,
const std::vector<bst_uint> &in_instance_id) {
CHECK(allocated);
thrust::copy(in_gpair.begin(), in_gpair.end(), gpair.begin());
thrust::fill(nodes.begin(), nodes.end(), Node());
thrust::fill(node_id_instance.begin(), node_id_instance.end(), 0);
thrust::fill(node_id.begin(), node_id.end(), 0);
this->CreateItems();
}
bool IsAllocated() { return allocated; }
// Gather from node_id_instance into node_id according to instance_id
void GatherNodeId() {
// Update node_id for each item
auto d_items = items.data();
auto d_node_id = node_id.data();
auto d_node_id_instance = node_id_instance.data();
auto counting = thrust::make_counting_iterator<bst_uint>(0);
thrust::for_each(counting, counting + fvalues.size(),
[=] __device__(bst_uint i) {
Item item = d_items[i];
d_node_id[i] = d_node_id_instance[item.instance_id];
});
}
};
GPUBuilder::GPUBuilder() { gpu_data = new GPUData(); }
void GPUBuilder::Init(const TrainParam ¶m_in) { param = param_in; }
GPUBuilder::~GPUBuilder() { delete gpu_data; }
template <int ITEMS_PER_THREAD, typename OffsetT>
__global__ void update_nodeid_missing_kernel(NodeIdT *d_node_id_instance,
Node *d_nodes, const OffsetT n) {
for (auto i : grid_stride_range(OffsetT(0), n)) {
NodeIdT item_node_id = d_node_id_instance[i];
if (item_node_id < 0) {
continue;
}
Node node = d_nodes[item_node_id];
if (node.IsLeaf()) {
d_node_id_instance[i] = -1;
} else if (node.split.missing_left) {
d_node_id_instance[i] = item_node_id * 2 + 1;
} else {
d_node_id_instance[i] = item_node_id * 2 + 2;
}
}
}
__device__ void load_as_words(const int n_nodes, Node *d_nodes, Node *s_nodes) {
const int upper_range = n_nodes * (sizeof(Node) / sizeof(int));
for (auto i : block_stride_range(0, upper_range)) {
reinterpret_cast<int *>(s_nodes)[i] = reinterpret_cast<int *>(d_nodes)[i];
}
}
template <int ITEMS_PER_THREAD>
__global__ void
update_nodeid_fvalue_kernel(NodeIdT *d_node_id, NodeIdT *d_node_id_instance,
Item *d_items, Node *d_nodes, const int n_nodes,
const int *d_feature_id, const size_t n,
const int n_features, bool cache_nodes) {
// Load nodes into shared memory
extern __shared__ Node s_nodes[];
if (cache_nodes) {
load_as_words(n_nodes, d_nodes, s_nodes);
__syncthreads();
}
for (auto i : grid_stride_range(size_t(0), n)) {
Item item = d_items[i];
NodeIdT item_node_id = d_node_id[i];
if (item_node_id < 0) {
continue;
}
Node node = cache_nodes ? s_nodes[item_node_id] : d_nodes[item_node_id];
if (node.IsLeaf()) {
continue;
}
int feature_id = d_feature_id[i];
if (feature_id == node.split.findex) {
if (item.fvalue < node.split.fvalue) {
d_node_id_instance[item.instance_id] = item_node_id * 2 + 1;
} else {
d_node_id_instance[item.instance_id] = item_node_id * 2 + 2;
}
}
}
}
void GPUBuilder::UpdateNodeId(int level) {
// Update all nodes based on missing direction
{
const bst_uint n = gpu_data->node_id_instance.size();
const bst_uint ITEMS_PER_THREAD = 8;
const bst_uint BLOCK_THREADS = 256;
const bst_uint GRID_SIZE =
div_round_up(n, ITEMS_PER_THREAD * BLOCK_THREADS);
update_nodeid_missing_kernel<
ITEMS_PER_THREAD><<<GRID_SIZE, BLOCK_THREADS>>>(
raw(gpu_data->node_id_instance), raw(gpu_data->nodes), n);
safe_cuda(cudaDeviceSynchronize());
}
// Update node based on fvalue where exists
{
const bst_uint n = gpu_data->fvalues.size();
const bst_uint ITEMS_PER_THREAD = 4;
const bst_uint BLOCK_THREADS = 256;
const bst_uint GRID_SIZE =
div_round_up(n, ITEMS_PER_THREAD * BLOCK_THREADS);
// Use smem cache version if possible
const bool cache_nodes = level < 7;
int n_nodes = (1 << (level + 1)) - 1;
int smem_size = cache_nodes ? sizeof(Node) * n_nodes : 0;
update_nodeid_fvalue_kernel<
ITEMS_PER_THREAD><<<GRID_SIZE, BLOCK_THREADS, smem_size>>>(
raw(gpu_data->node_id), raw(gpu_data->node_id_instance),
raw(gpu_data->items), raw(gpu_data->nodes), n_nodes,
raw(gpu_data->feature_id), gpu_data->fvalues.size(),
gpu_data->n_features, cache_nodes);
safe_cuda(cudaGetLastError());
safe_cuda(cudaDeviceSynchronize());
}
gpu_data->GatherNodeId();
}
void GPUBuilder::Sort(int level) {
thrust::sequence(gpu_data->sort_index_in.begin(),
gpu_data->sort_index_in.end());
if (!gpu_data->cub_mem.IsAllocated()) {
cub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.d_temp_storage, gpu_data->cub_mem.temp_storage_bytes,
raw(gpu_data->node_id), raw(gpu_data->node_id_temp),
raw(gpu_data->sort_index_in), raw(gpu_data->sort_index_out),
gpu_data->fvalues.size(), gpu_data->n_features, raw(gpu_data->foffsets),
raw(gpu_data->foffsets) + 1);
gpu_data->cub_mem.Allocate();
}
cub::DeviceSegmentedRadixSort::SortPairs(
gpu_data->cub_mem.d_temp_storage, gpu_data->cub_mem.temp_storage_bytes,
raw(gpu_data->node_id), raw(gpu_data->node_id_temp),
raw(gpu_data->sort_index_in), raw(gpu_data->sort_index_out),
gpu_data->fvalues.size(), gpu_data->n_features, raw(gpu_data->foffsets),
raw(gpu_data->foffsets) + 1);
thrust::gather(gpu_data->sort_index_out.begin(),
gpu_data->sort_index_out.end(), gpu_data->items.begin(),
gpu_data->items_temp.begin());
thrust::copy(gpu_data->items_temp.begin(), gpu_data->items_temp.end(),
gpu_data->items.begin());
thrust::copy(gpu_data->node_id_temp.begin(), gpu_data->node_id_temp.end(),
gpu_data->node_id.begin());
}
void GPUBuilder::Update(const std::vector<bst_gpair> &gpair, DMatrix *p_fmat,
RegTree *p_tree) {
try {
Timer update;
Timer t;
this->InitData(gpair, *p_fmat, *p_tree);
t.printElapsed("init data");
this->InitFirstNode();
for (int level = 0; level < param.max_depth; level++) {
bool use_multiscan_algorithm = level < multiscan_levels;
t.reset();
if (level > 0) {
Timer update_node;
this->UpdateNodeId(level);
update_node.printElapsed("node");
}
if (level > 0 && !use_multiscan_algorithm) {
Timer s;
this->Sort(level);
s.printElapsed("sort");
}
Timer split;
find_split(raw(gpu_data->items), raw(gpu_data->split_candidates),
raw(gpu_data->node_id), raw(gpu_data->nodes),
(bst_uint)gpu_data->fvalues.size(), gpu_data->n_features,
raw(gpu_data->foffsets), raw(gpu_data->node_sums),
raw(gpu_data->node_offsets), gpu_data->param, level,
use_multiscan_algorithm);
split.printElapsed("split");
t.printElapsed("level");
}
this->CopyTree(*p_tree);
update.printElapsed("update");
} catch (thrust::system_error &e) {
std::cerr << "CUDA error: " << e.what() << std::endl;
exit(-1);
}
}
void GPUBuilder::InitData(const std::vector<bst_gpair> &gpair, DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "ColMaker: can only grow new tree";
CHECK(fmat.SingleColBlock()) << "GPUMaker: must have single column block";
if (gpu_data->IsAllocated()) {
gpu_data->Reset(gpair, fvalues, instance_id);
return;
}
Timer t;
MetaInfo info = fmat.info();
dmlc::DataIter<ColBatch> *iter = fmat.ColIterator();
std::vector<int> foffsets;
foffsets.push_back(0);
std::vector<int> feature_id;
fvalues.reserve(info.num_col * info.num_row);
instance_id.reserve(info.num_col * info.num_row);
feature_id.reserve(info.num_col * info.num_row);
while (iter->Next()) {
const ColBatch &batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst &col = batch[i];
for (const ColBatch::Entry *it = col.data; it != col.data + col.length;
it++) {
fvalues.push_back(it->fvalue);
instance_id.push_back(it->index);
feature_id.push_back(i);
}
foffsets.push_back(fvalues.size());
}
}
t.printElapsed("dmatrix");
t.reset();
gpu_data->Init(fvalues, foffsets, instance_id, feature_id, gpair,
info.num_row, info.num_col, param.max_depth, param);
t.printElapsed("gpu init");
}
void GPUBuilder::InitFirstNode() {
// Build the root node on the CPU and copy to device
gpu_gpair sum_gradients =
thrust::reduce(gpu_data->gpair.begin(), gpu_data->gpair.end(),
gpu_gpair(0, 0), cub::Sum());
gpu_data->nodes[0] = Node(
sum_gradients,
CalcGain(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()),
CalcWeight(gpu_data->param, sum_gradients.grad(), sum_gradients.hess()));
}
enum NodeType {
NODE = 0,
LEAF = 1,
UNUSED = 2,
};
// Recursively label node types
void flag_nodes(const thrust::host_vector<Node> &nodes,
std::vector<NodeType> *node_flags, int nid, NodeType type) {
if (nid >= nodes.size() || type == UNUSED) {
return;
}
const Node &n = nodes[nid];
// Current node and all children are valid
if (n.split.loss_chg > rt_eps) {
(*node_flags)[nid] = NODE;
flag_nodes(nodes, node_flags, nid * 2 + 1, NODE);
flag_nodes(nodes, node_flags, nid * 2 + 2, NODE);
} else {
// Current node is leaf, therefore is valid but all children are invalid
(*node_flags)[nid] = LEAF;
flag_nodes(nodes, node_flags, nid * 2 + 1, UNUSED);
flag_nodes(nodes, node_flags, nid * 2 + 2, UNUSED);
}
}
// Copy gpu dense representation of tree to xgboost sparse representation
void GPUBuilder::CopyTree(RegTree &tree) {
thrust::host_vector<Node> h_nodes = gpu_data->nodes;
std::vector<NodeType> node_flags(h_nodes.size(), UNUSED);
flag_nodes(h_nodes, &node_flags, 0, NODE);
int nid = 0;
for (int gpu_nid = 0; gpu_nid < h_nodes.size(); gpu_nid++) {
NodeType flag = node_flags[gpu_nid];
const Node &n = h_nodes[gpu_nid];
if (flag == NODE) {
tree.AddChilds(nid);
tree[nid].set_split(n.split.findex, n.split.fvalue, n.split.missing_left);
tree.stat(nid).loss_chg = n.split.loss_chg;
tree.stat(nid).base_weight = n.weight;
tree.stat(nid).sum_hess = n.sum_gradients.hess();
tree[tree[nid].cleft()].set_leaf(0);
tree[tree[nid].cright()].set_leaf(0);
nid++;
} else if (flag == LEAF) {
tree[nid].set_leaf(n.weight * param.learning_rate);
tree.stat(nid).sum_hess = n.sum_gradients.hess();
nid++;
}
}
}
} // namespace tree
} // namespace xgboost
|
054d15192bfcb728054bfa880a65eb5408ec73ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__
omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ void *ReductionScratchpadPtr;
////////////////////////////////////////////////////////////////////////////////
// Data sharing related variables.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_sharedArgs;
| 054d15192bfcb728054bfa880a65eb5408ec73ef.cu | //===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__
omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ void *ReductionScratchpadPtr;
////////////////////////////////////////////////////////////////////////////////
// Data sharing related variables.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_sharedArgs;
|
1b69089e6e59cf116a6621a9cd53044f725f1267.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_new_pos;
glm::vec3 *dev_new_vel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
// Allocate dev_particleArrayIndices in device memory
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
// Allocate dev_particleGridIndices in device memory
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
// Allocate dev_gridCellStartIndices in device memory
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
// Allocate dev_gridCellEndIndices in device memory
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
//Allocate new position and velocity buffers in device memory for coherent implementation
hipMalloc((void**)&dev_new_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_new_pos failed!");
hipMalloc((void**)&dev_new_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_new_pos failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: boids try to stay a distance d away from each other
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
for (int i = 0; i < N; ++i) {
if (i == iSelf) {
continue;
}
float distance = glm::distance(pos[i], pos[iSelf]);
// Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[i];
centerInfluencerCount++;
}
// Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[i] - pos[iSelf]);
}
// Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel[i];
velocityInfluencerCount++;
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[iSelf]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
return finalVelocity;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 tempVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
float boidSpeed = glm::length(tempVel);
if (boidSpeed > maxSpeed) {
tempVel = (tempVel / boidSpeed) * maxSpeed;
}
vel2[index] = tempVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Filling the buffer dev_particleArrayIndices with index position of the boid
indices[index] = index;
// Find out the cell in which the boid exists
// Fill the dev_particleGridIndices buffer with the value of the cell index
glm::ivec3 gridIdx = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int nextIndex = index + 1;
if (index < N) {
int valAtIndex = particleGridIndices[index];
int valAtNextIndex = particleGridIndices[nextIndex];
if (index == 0) {
if (valAtIndex == valAtNextIndex) {
gridCellStartIndices[valAtIndex] = index;
}
else {
gridCellStartIndices[valAtIndex] = index;
gridCellEndIndices[valAtIndex] = index;
gridCellStartIndices[valAtNextIndex] = nextIndex;
}
}
else if (nextIndex == N) {
gridCellEndIndices[valAtIndex] = index;
}
else {
if (valAtIndex != valAtNextIndex) {
gridCellEndIndices[valAtIndex] = index;
gridCellStartIndices[valAtNextIndex] = nextIndex;
}
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: Cohesion
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: Seperation
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: Alignment
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
// - Identify the grid cell that this particle/boid is in
glm::vec3 boidPos = pos[index];
glm::ivec3 boidGridIndex = (boidPos - gridMin) * inverseCellWidth;
int boidCellIndex = gridIndex3Dto1D(boidGridIndex.x, boidGridIndex.y, boidGridIndex.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float neighbourhoodDistance = imax(imax(rule1Distance, rule2Distance), rule3Distance);
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
for (int k = -1; k < 2; ++k) {
int x = boidGridIndex.x + i;
int y = boidGridIndex.y + j;
int z = boidGridIndex.z + k;
// - Handeling Edge Case
x = imax(x, 0);
y = imax(y, 0);
z = imax(z, 0);
x = imin(x, gridResolution - 1);
y = imin(y, gridResolution - 1);
z = imin(z, gridResolution - 1);
// - Index of the neighbouring grid cell
int neighbourGridCellIndex = gridIndex3Dto1D(x, y, z, gridResolution);
if (gridCellStartIndices[neighbourGridCellIndex] != -1) {
// - Loop through the boids in this grid cell
// - Update the velocity of the current boid based on the neighbourhood distance
for (int l = gridCellStartIndices[neighbourGridCellIndex]; l <= gridCellEndIndices[neighbourGridCellIndex]; ++l) {
int boidIndex = particleArrayIndices[l];
if (boidIndex != index) {
float distance = glm::distance(boidPos, pos[boidIndex]);
// - Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[boidIndex];
centerInfluencerCount++;
}
// - Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[boidIndex] - boidPos);
}
// - Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel1[boidIndex];
velocityInfluencerCount++;
}
}
}
}
}
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[index]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
finalVelocity += vel1[index];
// - Clamp the speed change before putting the new speed in vel2
float boidSpeed = glm::length(finalVelocity);
if (boidSpeed > maxSpeed) {
finalVelocity = (finalVelocity / boidSpeed) * maxSpeed;
}
vel2[index] = finalVelocity;
}
__global__ void kernSwapPosVel(int N, int *particleArrayIndices, glm::vec3 *new_pos, glm::vec3 *new_vel, glm::vec3 *pos, glm::vec3 *vel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int boidIndex = particleArrayIndices[index];
new_pos[index] = pos[boidIndex];
new_vel[index] = vel[boidIndex];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: Cohesion
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: Seperation
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: Alignment
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
// - Identify the grid cell that this particle/boid is in
glm::vec3 boidPos = pos[index];
glm::ivec3 boidGridIndex = (boidPos - gridMin) * inverseCellWidth;
int boidCellIndex = gridIndex3Dto1D(boidGridIndex.x, boidGridIndex.y, boidGridIndex.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float neighbourhoodDistance = imax(imax(rule1Distance, rule2Distance), rule3Distance);
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
for (int k = -1; k < 2; ++k) {
int x = boidGridIndex.x + i;
int y = boidGridIndex.y + j;
int z = boidGridIndex.z + k;
// - Handeling Edge Case
x = imax(x, 0);
y = imax(y, 0);
z = imax(z, 0);
x = imin(x, gridResolution - 1);
y = imin(y, gridResolution - 1);
z = imin(z, gridResolution - 1);
// - Index of the neighbouring grid cell
int neighbourGridCellIndex = gridIndex3Dto1D(x, y, z, gridResolution);
if (gridCellStartIndices[neighbourGridCellIndex] != -1) {
// - Loop through the boids in this grid cell
// - Update the velocity of the current boid based on the neighbourhood distance
for (int boidIndex = gridCellStartIndices[neighbourGridCellIndex]; boidIndex <= gridCellEndIndices[neighbourGridCellIndex]; ++boidIndex) {
if (boidIndex != index) {
float distance = glm::distance(boidPos, pos[boidIndex]);
// - Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[boidIndex];
centerInfluencerCount++;
}
// - Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[boidIndex] - boidPos);
}
// - Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel1[boidIndex];
velocityInfluencerCount++;
}
}
}
}
}
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[index]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
finalVelocity += vel1[index];
// - Clamp the speed change before putting the new speed in vel2
float boidSpeed = glm::length(finalVelocity);
if (boidSpeed > maxSpeed) {
finalVelocity = (finalVelocity / boidSpeed) * maxSpeed;
}
vel2[index] = finalVelocity;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 blockspergrid((numObjects + blockSize - 1) / blockSize);
// todo-1.2 - use the kernels you wrote to step the simulation forward in time.
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(blockspergrid) , dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
hipLaunchKernelGGL(( kernUpdatePos), dim3(blockspergrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// todo-1.2 ping-pong the velocity bufferss
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// - Setup the dimensions for the kernels
dim3 fullBlocksPerGrid_GridCellDimension((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBlocksPerGrid_BoidDimension((numObjects + blockSize - 1) / blockSize);
// - PreFill the start and end index arrays with value -1
// This value will be used to determine if there are boids in the cells
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGrid_GridCellDimension), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGrid_GridCellDimension), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indicesfailed!");
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// - LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust::sort_by_key failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
// - Setup the dimensions for the kernels
dim3 fullBlocksPerGrid_GridCellDimension((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBlocksPerGrid_BoidDimension((numObjects + blockSize - 1) / blockSize);
// - PreFill the start and end index arrays with value -1
// This value will be used to determine if there are boids in the cells
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGrid_GridCellDimension), dim3(blockSize) , 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(fullBlocksPerGrid_GridCellDimension), dim3(blockSize) , 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indicesfailed!");
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// - LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust::sort_by_key failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize) , 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
hipLaunchKernelGGL(( kernSwapPosVel) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize) , 0, 0, numObjects, dev_particleArrayIndices, dev_new_pos, dev_new_vel1, dev_pos, dev_vel1);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_new_pos, dev_new_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid_BoidDimension), dim3(blockSize) , 0, 0, numObjects, dt, dev_new_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
std::swap(dev_new_pos, dev_pos);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_new_pos);
hipFree(dev_new_vel1);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 1b69089e6e59cf116a6621a9cd53044f725f1267.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_new_pos;
glm::vec3 *dev_new_vel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
// Allocate dev_particleArrayIndices in device memory
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
// Allocate dev_particleGridIndices in device memory
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
// Allocate dev_gridCellStartIndices in device memory
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
// Allocate dev_gridCellEndIndices in device memory
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
//Allocate new position and velocity buffers in device memory for coherent implementation
cudaMalloc((void**)&dev_new_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_new_pos failed!");
cudaMalloc((void**)&dev_new_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_new_pos failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: boids try to stay a distance d away from each other
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
for (int i = 0; i < N; ++i) {
if (i == iSelf) {
continue;
}
float distance = glm::distance(pos[i], pos[iSelf]);
// Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[i];
centerInfluencerCount++;
}
// Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[i] - pos[iSelf]);
}
// Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel[i];
velocityInfluencerCount++;
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[iSelf]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
return finalVelocity;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 tempVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
float boidSpeed = glm::length(tempVel);
if (boidSpeed > maxSpeed) {
tempVel = (tempVel / boidSpeed) * maxSpeed;
}
vel2[index] = tempVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Filling the buffer dev_particleArrayIndices with index position of the boid
indices[index] = index;
// Find out the cell in which the boid exists
// Fill the dev_particleGridIndices buffer with the value of the cell index
glm::ivec3 gridIdx = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int nextIndex = index + 1;
if (index < N) {
int valAtIndex = particleGridIndices[index];
int valAtNextIndex = particleGridIndices[nextIndex];
if (index == 0) {
if (valAtIndex == valAtNextIndex) {
gridCellStartIndices[valAtIndex] = index;
}
else {
gridCellStartIndices[valAtIndex] = index;
gridCellEndIndices[valAtIndex] = index;
gridCellStartIndices[valAtNextIndex] = nextIndex;
}
}
else if (nextIndex == N) {
gridCellEndIndices[valAtIndex] = index;
}
else {
if (valAtIndex != valAtNextIndex) {
gridCellEndIndices[valAtIndex] = index;
gridCellStartIndices[valAtNextIndex] = nextIndex;
}
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: Cohesion
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: Seperation
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: Alignment
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
// - Identify the grid cell that this particle/boid is in
glm::vec3 boidPos = pos[index];
glm::ivec3 boidGridIndex = (boidPos - gridMin) * inverseCellWidth;
int boidCellIndex = gridIndex3Dto1D(boidGridIndex.x, boidGridIndex.y, boidGridIndex.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float neighbourhoodDistance = imax(imax(rule1Distance, rule2Distance), rule3Distance);
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
for (int k = -1; k < 2; ++k) {
int x = boidGridIndex.x + i;
int y = boidGridIndex.y + j;
int z = boidGridIndex.z + k;
// - Handeling Edge Case
x = imax(x, 0);
y = imax(y, 0);
z = imax(z, 0);
x = imin(x, gridResolution - 1);
y = imin(y, gridResolution - 1);
z = imin(z, gridResolution - 1);
// - Index of the neighbouring grid cell
int neighbourGridCellIndex = gridIndex3Dto1D(x, y, z, gridResolution);
if (gridCellStartIndices[neighbourGridCellIndex] != -1) {
// - Loop through the boids in this grid cell
// - Update the velocity of the current boid based on the neighbourhood distance
for (int l = gridCellStartIndices[neighbourGridCellIndex]; l <= gridCellEndIndices[neighbourGridCellIndex]; ++l) {
int boidIndex = particleArrayIndices[l];
if (boidIndex != index) {
float distance = glm::distance(boidPos, pos[boidIndex]);
// - Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[boidIndex];
centerInfluencerCount++;
}
// - Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[boidIndex] - boidPos);
}
// - Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel1[boidIndex];
velocityInfluencerCount++;
}
}
}
}
}
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[index]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
finalVelocity += vel1[index];
// - Clamp the speed change before putting the new speed in vel2
float boidSpeed = glm::length(finalVelocity);
if (boidSpeed > maxSpeed) {
finalVelocity = (finalVelocity / boidSpeed) * maxSpeed;
}
vel2[index] = finalVelocity;
}
__global__ void kernSwapPosVel(int N, int *particleArrayIndices, glm::vec3 *new_pos, glm::vec3 *new_vel, glm::vec3 *pos, glm::vec3 *vel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int boidIndex = particleArrayIndices[index];
new_pos[index] = pos[boidIndex];
new_vel[index] = vel[boidIndex];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Final velocity of the boid
glm::vec3 finalVelocity(0.0f, 0.0f, 0.0f);
// Rule 1: Cohesion
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int centerInfluencerCount = 0;
// Rule 2: Seperation
glm::vec3 seperation(0.0f, 0.0f, 0.0f);
// Rule 3: Alignment
glm::vec3 perceivedVelocity(0.0f, 0.0f, 0.0f);
int velocityInfluencerCount = 0;
// - Identify the grid cell that this particle/boid is in
glm::vec3 boidPos = pos[index];
glm::ivec3 boidGridIndex = (boidPos - gridMin) * inverseCellWidth;
int boidCellIndex = gridIndex3Dto1D(boidGridIndex.x, boidGridIndex.y, boidGridIndex.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
float neighbourhoodDistance = imax(imax(rule1Distance, rule2Distance), rule3Distance);
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
for (int k = -1; k < 2; ++k) {
int x = boidGridIndex.x + i;
int y = boidGridIndex.y + j;
int z = boidGridIndex.z + k;
// - Handeling Edge Case
x = imax(x, 0);
y = imax(y, 0);
z = imax(z, 0);
x = imin(x, gridResolution - 1);
y = imin(y, gridResolution - 1);
z = imin(z, gridResolution - 1);
// - Index of the neighbouring grid cell
int neighbourGridCellIndex = gridIndex3Dto1D(x, y, z, gridResolution);
if (gridCellStartIndices[neighbourGridCellIndex] != -1) {
// - Loop through the boids in this grid cell
// - Update the velocity of the current boid based on the neighbourhood distance
for (int boidIndex = gridCellStartIndices[neighbourGridCellIndex]; boidIndex <= gridCellEndIndices[neighbourGridCellIndex]; ++boidIndex) {
if (boidIndex != index) {
float distance = glm::distance(boidPos, pos[boidIndex]);
// - Rule 1 cohesion
if (distance < rule1Distance) {
perceivedCenter += pos[boidIndex];
centerInfluencerCount++;
}
// - Rule 2 separation
if (distance < rule2Distance) {
seperation -= (pos[boidIndex] - boidPos);
}
// - Rule 3 alignment
if (distance < rule3Distance) {
perceivedVelocity += vel1[boidIndex];
velocityInfluencerCount++;
}
}
}
}
}
}
}
if (centerInfluencerCount > 0) {
perceivedCenter /= centerInfluencerCount;
finalVelocity += (perceivedCenter - pos[index]) * rule1Scale;
}
if (velocityInfluencerCount > 0) {
perceivedVelocity /= velocityInfluencerCount;
finalVelocity += perceivedVelocity * rule3Scale;
}
finalVelocity += seperation * rule2Scale;
finalVelocity += vel1[index];
// - Clamp the speed change before putting the new speed in vel2
float boidSpeed = glm::length(finalVelocity);
if (boidSpeed > maxSpeed) {
finalVelocity = (finalVelocity / boidSpeed) * maxSpeed;
}
vel2[index] = finalVelocity;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 blockspergrid((numObjects + blockSize - 1) / blockSize);
// todo-1.2 - use the kernels you wrote to step the simulation forward in time.
kernUpdateVelocityBruteForce<<<blockspergrid , blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos<<<blockspergrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// todo-1.2 ping-pong the velocity bufferss
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// - Setup the dimensions for the kernels
dim3 fullBlocksPerGrid_GridCellDimension((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBlocksPerGrid_BoidDimension((numObjects + blockSize - 1) / blockSize);
// - PreFill the start and end index arrays with value -1
// This value will be used to determine if there are boids in the cells
kernResetIntBuffer <<<fullBlocksPerGrid_GridCellDimension, blockSize>>> (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer <<<fullBlocksPerGrid_GridCellDimension, blockSize>>> (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indicesfailed!");
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices <<<fullBlocksPerGrid_BoidDimension, blockSize>>> (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// - LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust::sort_by_key failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd <<<fullBlocksPerGrid_BoidDimension, blockSize>>> (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered <<<fullBlocksPerGrid_BoidDimension, blockSize>>> (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos <<<fullBlocksPerGrid_BoidDimension, blockSize>>> (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
// - Setup the dimensions for the kernels
dim3 fullBlocksPerGrid_GridCellDimension((gridCellCount + blockSize - 1) / blockSize);
dim3 fullBlocksPerGrid_BoidDimension((numObjects + blockSize - 1) / blockSize);
// - PreFill the start and end index arrays with value -1
// This value will be used to determine if there are boids in the cells
kernResetIntBuffer <<<fullBlocksPerGrid_GridCellDimension, blockSize >>> (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer <<<fullBlocksPerGrid_GridCellDimension, blockSize >>> (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indicesfailed!");
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices <<<fullBlocksPerGrid_BoidDimension, blockSize >>> (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// - LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust::sort_by_key failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd <<<fullBlocksPerGrid_BoidDimension, blockSize >>> (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernSwapPosVel <<<fullBlocksPerGrid_BoidDimension, blockSize >>> (numObjects, dev_particleArrayIndices, dev_new_pos, dev_new_vel1, dev_pos, dev_vel1);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent <<<fullBlocksPerGrid_BoidDimension, blockSize >>> (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_new_pos, dev_new_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos <<<fullBlocksPerGrid_BoidDimension, blockSize >>> (numObjects, dt, dev_new_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
std::swap(dev_new_pos, dev_pos);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_new_pos);
cudaFree(dev_new_vel1);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
49beb5b763230bcde2a9e405e1206986bebcab3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 49beb5b763230bcde2a9e405e1206986bebcab3b.cu | #include <cuda_runtime.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
596781197880d0009de24f9675d075623390682b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void histogram_kernel_optimized(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
// INSERT CODE HERE
extern __shared__ unsigned int bins_s[];
//Shared memory
int thid = threadIdx.x;
while ( thid < num_bins){
bins_s[thid] = 0u;
thid += blockDim.x;
}
__syncthreads();
//Histogram calculation
unsigned int element = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int accumulator = 0;
unsigned int prev_index = 0;
while(element < num_elements){
unsigned int curr_index = input[element];
if(curr_index != prev_index){
atomicAdd(&(bins_s[prev_index]), accumulator);
accumulator = 1;
prev_index = curr_index;
}
else{
accumulator++;
}
element += blockDim.x * gridDim.x;
}
if(accumulator > 0){
atomicAdd(&(bins_s[prev_index]), accumulator);
}
__syncthreads();
//Global memory
thid = threadIdx.x;
while(thid < num_bins){
atomicAdd(&(bins[thid]), bins_s[thid]);
thid += blockDim.x;
}
} | 596781197880d0009de24f9675d075623390682b.cu | #include "includes.h"
__global__ void histogram_kernel_optimized(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
// INSERT CODE HERE
extern __shared__ unsigned int bins_s[];
//Shared memory
int thid = threadIdx.x;
while ( thid < num_bins){
bins_s[thid] = 0u;
thid += blockDim.x;
}
__syncthreads();
//Histogram calculation
unsigned int element = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int accumulator = 0;
unsigned int prev_index = 0;
while(element < num_elements){
unsigned int curr_index = input[element];
if(curr_index != prev_index){
atomicAdd(&(bins_s[prev_index]), accumulator);
accumulator = 1;
prev_index = curr_index;
}
else{
accumulator++;
}
element += blockDim.x * gridDim.x;
}
if(accumulator > 0){
atomicAdd(&(bins_s[prev_index]), accumulator);
}
__syncthreads();
//Global memory
thid = threadIdx.x;
while(thid < num_bins){
atomicAdd(&(bins[thid]), bins_s[thid]);
thid += blockDim.x;
}
} |
b5fb1f4332707d1800729bf68cac758c2767362a.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
const char log_name[] = "log_kernel";
void log_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log_string = jiterator_stringify(
template <typename T> T log_kernel(T x) { return ::log(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "log_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::log(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
}
const char log10_name[] = "log10_kernel";
void log10_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log10_string = jiterator_stringify(
template <typename T> T log10_kernel(T x) { return std::log10(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log10_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log10_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log10_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log10(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
}
void log1p_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
const char log2_name[] = "log2_kernel";
void log2_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log2_string = jiterator_stringify(
template <typename T> T log2_kernel(T x) { return std::log2(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log2_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log2_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log2_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log2(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
}} // namespace at::native
| b5fb1f4332707d1800729bf68cac758c2767362a.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
const char log_name[] = "log_kernel";
void log_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log_string = jiterator_stringify(
template <typename T> T log_kernel(T x) { return std::log(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "log_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::log(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log(a);
});
});
}
}
const char log10_name[] = "log10_kernel";
void log10_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log10_string = jiterator_stringify(
template <typename T> T log10_kernel(T x) { return std::log10(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log10_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log10_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log10_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log10(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log10_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log10(a);
});
});
}
}
void log1p_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log1p_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log1p(a);
});
});
}
const char log2_name[] = "log2_kernel";
void log2_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto log2_string = jiterator_stringify(
template <typename T> T log2_kernel(T x) { return std::log2(x); });
AT_DISPATCH_COMPLEX_TYPES(common_dtype, "log2_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/log2_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, log2_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::log2(a); });
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "log2_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::log2(a);
});
});
}
}
REGISTER_DISPATCH(log_stub, &log_kernel_cuda);
REGISTER_DISPATCH(log10_stub, &log10_kernel_cuda);
REGISTER_DISPATCH(log2_stub, &log2_kernel_cuda);
REGISTER_DISPATCH(log1p_stub, &log1p_kernel_cuda);
}} // namespace at::native
|
4e2953306cd31cd4430e2bbc120a85ed2a3485a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.cuh"
#include "pack.cuh"
#include "cuptens.h"
// void cupack(const float *a, uint32_t *b, int M, int N, int L)
// {
// ker_pack <<<CEIL(M*N*L, 32), 32>>> (a, b);
// }
void cupack(cuftens *src, cuptens *dst)
{
const int len = cuftens_len(src);
hipLaunchKernelGGL(( ker_pack) , dim3(CEIL(len, 32)), dim3(32), 0, 0,
src->data, (uint32_t *)dst->data);
}
| 4e2953306cd31cd4430e2bbc120a85ed2a3485a2.cu | #include "util.cuh"
#include "pack.cuh"
#include "cuptens.h"
// void cupack(const float *a, uint32_t *b, int M, int N, int L)
// {
// ker_pack <<<CEIL(M*N*L, 32), 32>>> (a, b);
// }
void cupack(cuftens *src, cuptens *dst)
{
const int len = cuftens_len(src);
ker_pack <<<CEIL(len, 32), 32>>>
(src->data, (uint32_t *)dst->data);
}
|
f3b2312ba6551e512244f5e0fd4cd9a57bb18ae3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexBinaryFlat.h>
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Timer.h>
#include <faiss/utils/random.h>
#include <gflags/gflags.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <map>
#include <memory>
#include <vector>
#include <hip/hip_runtime_api.h>
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num, 128, "# of vecs");
DEFINE_int32(dim, 128, "# of dimensions");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use");
DEFINE_bool(cpu, true, "run the CPU code for timing and comparison");
DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
hipProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexBinaryFlat>(
new faiss::IndexBinaryFlat(FLAGS_dim));
HostTensor<unsigned char, 2, true> vecs({FLAGS_num, FLAGS_dim / 8});
faiss::byte_rand(vecs.data(), vecs.numElements(), seed);
index->add(FLAGS_num, vecs.data());
printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num);
printf("Hamming lookup: %d queries, total k %d\n", numQueries, FLAGS_k);
// Convert to GPU index
printf("Copying index to GPU...\n");
GpuIndexBinaryFlatConfig config;
config.memorySpace =
FLAGS_use_unified_mem ? MemorySpace::Unified : MemorySpace::Device;
faiss::gpu::StandardGpuResources res;
faiss::gpu::GpuIndexBinaryFlat gpuIndex(&res, index.get(), config);
printf("copy done\n");
// Build query vectors
HostTensor<unsigned char, 2, true> cpuQuery({numQueries, FLAGS_dim / 8});
faiss::byte_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<int, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::IndexBinary::idx_t, 2, true> cpuIndices(
{numQueries, FLAGS_k});
if (FLAGS_cpu) {
float cpuTime = 0.0f;
CpuTimer timer;
index->search(
numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
printf("CPU time %.3f ms\n", cpuTime);
}
HostTensor<int, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(hipProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.search(
cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(hipProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
CUDA_VERIFY(hipDeviceSynchronize());
return 0;
}
| f3b2312ba6551e512244f5e0fd4cd9a57bb18ae3.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexBinaryFlat.h>
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Timer.h>
#include <faiss/utils/random.h>
#include <gflags/gflags.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <map>
#include <memory>
#include <vector>
#include <cuda_profiler_api.h>
DEFINE_int32(k, 3, "final number of closest results returned");
DEFINE_int32(num, 128, "# of vecs");
DEFINE_int32(dim, 128, "# of dimensions");
DEFINE_int32(num_queries, 3, "number of query vectors");
DEFINE_int64(seed, -1, "specify random seed");
DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use");
DEFINE_bool(cpu, true, "run the CPU code for timing and comparison");
DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index");
using namespace faiss::gpu;
int main(int argc, char** argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
cudaProfilerStop();
auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr);
printf("using seed %ld\n", seed);
auto numQueries = FLAGS_num_queries;
auto index = std::unique_ptr<faiss::IndexBinaryFlat>(
new faiss::IndexBinaryFlat(FLAGS_dim));
HostTensor<unsigned char, 2, true> vecs({FLAGS_num, FLAGS_dim / 8});
faiss::byte_rand(vecs.data(), vecs.numElements(), seed);
index->add(FLAGS_num, vecs.data());
printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num);
printf("Hamming lookup: %d queries, total k %d\n", numQueries, FLAGS_k);
// Convert to GPU index
printf("Copying index to GPU...\n");
GpuIndexBinaryFlatConfig config;
config.memorySpace =
FLAGS_use_unified_mem ? MemorySpace::Unified : MemorySpace::Device;
faiss::gpu::StandardGpuResources res;
faiss::gpu::GpuIndexBinaryFlat gpuIndex(&res, index.get(), config);
printf("copy done\n");
// Build query vectors
HostTensor<unsigned char, 2, true> cpuQuery({numQueries, FLAGS_dim / 8});
faiss::byte_rand(cpuQuery.data(), cpuQuery.numElements(), seed);
// Time faiss CPU
HostTensor<int, 2, true> cpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::IndexBinary::idx_t, 2, true> cpuIndices(
{numQueries, FLAGS_k});
if (FLAGS_cpu) {
float cpuTime = 0.0f;
CpuTimer timer;
index->search(
numQueries,
cpuQuery.data(),
FLAGS_k,
cpuDistances.data(),
cpuIndices.data());
cpuTime = timer.elapsedMilliseconds();
printf("CPU time %.3f ms\n", cpuTime);
}
HostTensor<int, 2, true> gpuDistances({numQueries, FLAGS_k});
HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k});
CUDA_VERIFY(cudaProfilerStart());
faiss::gpu::synchronizeAllDevices();
float gpuTime = 0.0f;
// Time GPU
{
CpuTimer timer;
gpuIndex.search(
cpuQuery.getSize(0),
cpuQuery.data(),
FLAGS_k,
gpuDistances.data(),
gpuIndices.data());
// There is a device -> host copy above, so no need to time
// additional synchronization with the GPU
gpuTime = timer.elapsedMilliseconds();
}
CUDA_VERIFY(cudaProfilerStop());
printf("GPU time %.3f ms\n", gpuTime);
CUDA_VERIFY(cudaDeviceSynchronize());
return 0;
}
|
8df36de29fdd22b0169841bf43b169ab35d8a99f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_quantization_impl.cuh"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <class T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void DequantizeLinearKernel(const int32_t* quantize, const T* bias, T* output, T scale, int bias_len, CUDA_LONG N) {
CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output[id] = (static_cast<T>(quantize[id]) * scale) + bias[id % bias_len];
id += NumThreadsPerBlock;
}
}
}
template <class T>
Status CudaDequantizeWithBias(const int32_t* quantize, const T* bias, T* output, T scale, int m, int n) {
int blocksPerGrid = static_cast<int>(CeilDiv(m * n, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(m * n);
hipLaunchKernelGGL(( DequantizeLinearKernel<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
quantize,
bias,
output,
scale,
n,
N);
return Status::OK();
}
template Status CudaDequantizeWithBias<float>(const int32_t* quantize, const float* bias, float* output, float scale, int m, int n);
template Status CudaDequantizeWithBias<half>(const int32_t* quantize, const half* bias, half* output, half scale, int m, int n);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 8df36de29fdd22b0169841bf43b169ab35d8a99f.cu | // Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_quantization_impl.cuh"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <class T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void DequantizeLinearKernel(const int32_t* quantize, const T* bias, T* output, T scale, int bias_len, CUDA_LONG N) {
CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output[id] = (static_cast<T>(quantize[id]) * scale) + bias[id % bias_len];
id += NumThreadsPerBlock;
}
}
}
template <class T>
Status CudaDequantizeWithBias(const int32_t* quantize, const T* bias, T* output, T scale, int m, int n) {
int blocksPerGrid = static_cast<int>(CeilDiv(m * n, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(m * n);
DequantizeLinearKernel<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
quantize,
bias,
output,
scale,
n,
N);
return Status::OK();
}
template Status CudaDequantizeWithBias<float>(const int32_t* quantize, const float* bias, float* output, float scale, int m, int n);
template Status CudaDequantizeWithBias<half>(const int32_t* quantize, const half* bias, half* output, half scale, int m, int n);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
56d045c859e39a8957c05fdf4b8f879b5708d06b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/// ================================================================
///
/// Disclaimer: IMPORTANT: This software was developed at theNT
/// National Institute of Standards and Technology by employees of the
/// Federal Government in the course of their official duties.
/// Pursuant to title 17 Section 105 of the United States Code this
/// software is not subject to copyright protection and is in the
/// public domain. This is an experimental system. NIST assumes no
/// responsibility whatsoever for its use by other parties, and makes
/// no guarantees, expressed or implied, about its quality,
/// reliability, or any other characteristic. We would appreciate
/// acknowledgement if the software is used. This software can be
/// redistributed and/or modified freely provided that any derivative
/// works bear some notice that they are derived from it, and any
/// modified versions bear some notice that they have been modified.
///
/// ================================================================
// ================================================================
//
// Author: Timothy Blattner
// Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex
//
// Functions that execute on the graphics card for doing
// Vector computation.
//
// ================================================================
#define THREADS_PER_BLOCK 256
#define MIN_DISTANCE 1.0
// ================================================================
__device__ bool checkDistance(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width)
{
int row = curIdx / width;
int col = curIdx % width;
int j;
//double dist;
for (j = 0; j < nMax; j++)
{
if (maxesRow[j] == row && maxesCol[j] == col)
return false;
// dist = distance(maxesRow[j], row, maxesCol[j], col);
// if (dist < MIN_DISTANCE)
// return false;
}
return true;
}
__device__ bool checkDistance(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width)
{
int row = curIdx / width;
int col = curIdx % width;
int j;
//double dist;
for (j = 0; j < nMax; j++)
{
if (maxesRow[j] == row && maxesCol[j] == col)
return false;
//dist = distance(maxesRow[j], row, maxesCol[j], col);
//if (dist < MIN_DISTANCE)
// return false;
}
return true;
}
__global__ void reduce_max_filter_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax)
{
__shared__ int smaxesRow[10];
__shared__ int smaxesCol[10];
__shared__ int smaxesVal[10];
__shared__ double sdata[THREADS_PER_BLOCK];
__shared__ int idxData[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
if (tid < nMax)
{
smaxesVal[tid] = maxes[tid];
smaxesRow[tid] = smaxesVal[tid] / width;
smaxesCol[tid] = smaxesVal[tid] % width;
}
__syncthreads();
double myMax = 0.0;
int myMaxIndex;
while (i < n)
{
if (myMax < g_idata[i])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, max_idx[i], width))
{
myMax = g_idata[i];
myMaxIndex = max_idx[i];
}
}
if (i+blockSize < n)
{
if (myMax < g_idata[i+blockSize])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax,
max_idx[i+blockSize],
width))
{
myMax = g_idata[i+blockSize];
myMaxIndex = max_idx[i+blockSize];
}
}
}
i += gridSize;
}
sdata[tid] = myMax;
idxData[tid] = myMaxIndex;
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
if (myMax < sdata[tid + 256])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+256],
width))
{
sdata[tid] = myMax = sdata[tid+256];
idxData[tid] = idxData[tid+256];
}
}
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
if (myMax < sdata[tid + 128])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+128],
width))
{
sdata[tid] = myMax = sdata[tid+128];
idxData[tid] = idxData[tid+128];
}
}
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
if(myMax < sdata[tid + 64])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+64],
width))
{
sdata[tid] = myMax = sdata[tid+64];
idxData[tid] = idxData[tid+64];
}
}
}
__syncthreads();
}
volatile double *vdata = sdata;
volatile int *vidxData = idxData;
volatile int *vsmaxesRow = smaxesRow;
volatile int *vsmaxesCol = smaxesCol;
if (tid < 32)
{
if (blockSize >= 64)
if (myMax < vdata[tid + 32])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+32],
width))
{
vdata[tid] = myMax = vdata[tid+32];
vidxData[tid] = vidxData[tid+32];
}
}
if (blockSize >= 32)
if (myMax < vdata[tid + 16])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+16],
width))
{
vdata[tid] = myMax = vdata[tid+16];
vidxData[tid] = vidxData[tid+16];
}
}
if (blockSize >= 16)
if (myMax < vdata[tid + 8])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+8],
width))
{
vdata[tid] = myMax = vdata[tid+8];
vidxData[tid] = vidxData[tid+8];
}
}
if (blockSize >= 8)
if (myMax < vdata[tid + 4])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+4],
width))
{
vdata[tid] = myMax = vdata[tid+4];
vidxData[tid] = vidxData[tid+4];
}
}
if (blockSize >= 4)
if (myMax < vdata[tid+2])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+2],
width))
{
vdata[tid] = myMax = vdata[tid+2];
vidxData[tid] = vidxData[tid+2];
}
}
if (blockSize >= 2)
if (myMax < vdata[tid + 1])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+1],
width))
{
vdata[tid] = myMax = vdata[tid+1];
vidxData[tid] = vidxData[tid+1];
}
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
max_idx[blockIdx.x] = idxData[0];
if (gridDim.x == 1)
maxes[nMax] = idxData[0];
}
} | 56d045c859e39a8957c05fdf4b8f879b5708d06b.cu | #include "includes.h"
/// ================================================================
///
/// Disclaimer: IMPORTANT: This software was developed at theNT
/// National Institute of Standards and Technology by employees of the
/// Federal Government in the course of their official duties.
/// Pursuant to title 17 Section 105 of the United States Code this
/// software is not subject to copyright protection and is in the
/// public domain. This is an experimental system. NIST assumes no
/// responsibility whatsoever for its use by other parties, and makes
/// no guarantees, expressed or implied, about its quality,
/// reliability, or any other characteristic. We would appreciate
/// acknowledgement if the software is used. This software can be
/// redistributed and/or modified freely provided that any derivative
/// works bear some notice that they are derived from it, and any
/// modified versions bear some notice that they have been modified.
///
/// ================================================================
// ================================================================
//
// Author: Timothy Blattner
// Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex
//
// Functions that execute on the graphics card for doing
// Vector computation.
//
// ================================================================
#define THREADS_PER_BLOCK 256
#define MIN_DISTANCE 1.0
// ================================================================
__device__ bool checkDistance(volatile int *maxesRow, volatile int *maxesCol, int nMax, int curIdx, int width)
{
int row = curIdx / width;
int col = curIdx % width;
int j;
//double dist;
for (j = 0; j < nMax; j++)
{
if (maxesRow[j] == row && maxesCol[j] == col)
return false;
// dist = distance(maxesRow[j], row, maxesCol[j], col);
// if (dist < MIN_DISTANCE)
// return false;
}
return true;
}
__device__ bool checkDistance(int *maxesRow, int *maxesCol, int nMax, int curIdx, int width)
{
int row = curIdx / width;
int col = curIdx % width;
int j;
//double dist;
for (j = 0; j < nMax; j++)
{
if (maxesRow[j] == row && maxesCol[j] == col)
return false;
//dist = distance(maxesRow[j], row, maxesCol[j], col);
//if (dist < MIN_DISTANCE)
// return false;
}
return true;
}
__global__ void reduce_max_filter_final(double *g_idata, double *g_odata, int * max_idx, unsigned int n, unsigned int width, int blockSize, int *maxes, int nMax)
{
__shared__ int smaxesRow[10];
__shared__ int smaxesCol[10];
__shared__ int smaxesVal[10];
__shared__ double sdata[THREADS_PER_BLOCK];
__shared__ int idxData[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
if (tid < nMax)
{
smaxesVal[tid] = maxes[tid];
smaxesRow[tid] = smaxesVal[tid] / width;
smaxesCol[tid] = smaxesVal[tid] % width;
}
__syncthreads();
double myMax = 0.0;
int myMaxIndex;
while (i < n)
{
if (myMax < g_idata[i])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, max_idx[i], width))
{
myMax = g_idata[i];
myMaxIndex = max_idx[i];
}
}
if (i+blockSize < n)
{
if (myMax < g_idata[i+blockSize])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax,
max_idx[i+blockSize],
width))
{
myMax = g_idata[i+blockSize];
myMaxIndex = max_idx[i+blockSize];
}
}
}
i += gridSize;
}
sdata[tid] = myMax;
idxData[tid] = myMaxIndex;
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
if (myMax < sdata[tid + 256])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+256],
width))
{
sdata[tid] = myMax = sdata[tid+256];
idxData[tid] = idxData[tid+256];
}
}
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
if (myMax < sdata[tid + 128])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+128],
width))
{
sdata[tid] = myMax = sdata[tid+128];
idxData[tid] = idxData[tid+128];
}
}
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
if(myMax < sdata[tid + 64])
{
if (checkDistance(smaxesRow, smaxesCol,
nMax, idxData[tid+64],
width))
{
sdata[tid] = myMax = sdata[tid+64];
idxData[tid] = idxData[tid+64];
}
}
}
__syncthreads();
}
volatile double *vdata = sdata;
volatile int *vidxData = idxData;
volatile int *vsmaxesRow = smaxesRow;
volatile int *vsmaxesCol = smaxesCol;
if (tid < 32)
{
if (blockSize >= 64)
if (myMax < vdata[tid + 32])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+32],
width))
{
vdata[tid] = myMax = vdata[tid+32];
vidxData[tid] = vidxData[tid+32];
}
}
if (blockSize >= 32)
if (myMax < vdata[tid + 16])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+16],
width))
{
vdata[tid] = myMax = vdata[tid+16];
vidxData[tid] = vidxData[tid+16];
}
}
if (blockSize >= 16)
if (myMax < vdata[tid + 8])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+8],
width))
{
vdata[tid] = myMax = vdata[tid+8];
vidxData[tid] = vidxData[tid+8];
}
}
if (blockSize >= 8)
if (myMax < vdata[tid + 4])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+4],
width))
{
vdata[tid] = myMax = vdata[tid+4];
vidxData[tid] = vidxData[tid+4];
}
}
if (blockSize >= 4)
if (myMax < vdata[tid+2])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+2],
width))
{
vdata[tid] = myMax = vdata[tid+2];
vidxData[tid] = vidxData[tid+2];
}
}
if (blockSize >= 2)
if (myMax < vdata[tid + 1])
{
if (checkDistance(vsmaxesRow, vsmaxesCol,
nMax, vidxData[tid+1],
width))
{
vdata[tid] = myMax = vdata[tid+1];
vidxData[tid] = vidxData[tid+1];
}
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
max_idx[blockIdx.x] = idxData[0];
if (gridDim.x == 1)
maxes[nMax] = idxData[0];
}
} |
f48ebb2c5d602af6e6a28af2eea0877620aa316c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/TextureTools.h"
#include <stdio.h>
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMono
*/
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void gpu_hpmc_excell_kernel(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
unsigned int neigh_cell_size = d_cell_size[neigh_cell];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = tex1Dfetch(cell_idx_tex, cli(k, neigh_cell));
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel driver for gpu_hpmc_excell_kernel()
hipError_t gpu_hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_hpmc_excell_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
dim3 threads(min(block_size, (unsigned int)max_block_size), 1, 1);
dim3 grid(ci.getNumElements() / block_size + 1, 1, 1);
// bind the textures
cell_idx_tex.normalized = false;
cell_idx_tex.filterMode = hipFilterModePoint;
hipError_t error = hipBindTexture(0, cell_idx_tex, d_cell_idx, sizeof(unsigned int)*cli.getNumElements());
if (error != hipSuccess)
return error;
hipLaunchKernelGGL(( gpu_hpmc_excell_kernel), dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji);
return hipSuccess;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void gpu_hpmc_shift_kernel(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//! Kernel driver for gpu_hpmc_shift_kernel()
hipError_t gpu_hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(( gpu_hpmc_shift_kernel), dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
return hipSuccess;
}
}; // end namespace detail
} // end namespace hpmc
| f48ebb2c5d602af6e6a28af2eea0877620aa316c.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/TextureTools.h"
#include <stdio.h>
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMono
*/
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void gpu_hpmc_excell_kernel(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
unsigned int neigh_cell_size = d_cell_size[neigh_cell];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = tex1Dfetch(cell_idx_tex, cli(k, neigh_cell));
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel driver for gpu_hpmc_excell_kernel()
cudaError_t gpu_hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_hpmc_excell_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
dim3 threads(min(block_size, (unsigned int)max_block_size), 1, 1);
dim3 grid(ci.getNumElements() / block_size + 1, 1, 1);
// bind the textures
cell_idx_tex.normalized = false;
cell_idx_tex.filterMode = cudaFilterModePoint;
cudaError_t error = cudaBindTexture(0, cell_idx_tex, d_cell_idx, sizeof(unsigned int)*cli.getNumElements());
if (error != cudaSuccess)
return error;
gpu_hpmc_excell_kernel<<<grid, threads>>>(d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji);
return cudaSuccess;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void gpu_hpmc_shift_kernel(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//! Kernel driver for gpu_hpmc_shift_kernel()
cudaError_t gpu_hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
gpu_hpmc_shift_kernel<<<grid_shift, threads_shift>>>(d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
cudaDeviceSynchronize();
return cudaSuccess;
}
}; // end namespace detail
} // end namespace hpmc
|
d4f2872d8cb53302dda6fe3815c377b7c1189ac4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
void reference (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float* dX1, float* dX2) {
for (int i = 0; i < N; i++) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient2(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float y = Y[i];
float o = dOutput[i];
float dist = -y * (X1[i] - X2[i]) + margin;
dX1[i] = dist < 0.f ? 0.f : -y * o;
dX2[i] = dist < 0.f ? 0.f : y * o;
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
size_t size_bytes = length * sizeof(float);
float *h_X1 = (float*) malloc (size_bytes);
float *h_X2 = (float*) malloc (size_bytes);
float *h_O = (float*) malloc (size_bytes);
int *h_Y = ( int*) malloc (size_bytes);
float *h_dX1 = (float*) malloc (size_bytes);
float *h_dX2 = (float*) malloc (size_bytes);
float *r_dX1 = (float*) malloc (size_bytes);
float *r_dX2 = (float*) malloc (size_bytes);
const float m = 0.01; // margin
std::default_random_engine g (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < length; i++) {
h_X1[i] = distr(g);
h_X2[i] = distr(g);
h_O[i] = distr(g);
h_Y[i] = (distr(g) < 0) ? -1 : 1 ;
}
float *d_X1, *d_X2, *d_O, *d_dX1, *d_dX2;
int *d_Y;
hipMalloc((void**)&d_X1, size_bytes);
hipMemcpy(d_X1, h_X1, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_X2, size_bytes);
hipMemcpy(d_X2, h_X2, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_O, size_bytes);
hipMemcpy(d_O, h_O, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_Y, size_bytes);
hipMemcpy(d_Y, h_Y, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_dX1, size_bytes);
hipMalloc((void**)&d_dX2, size_bytes);
dim3 grid ((length + 255) / 256);
dim3 block (256);
// warmup
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( MRCGradient) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipLaunchKernelGGL(( MRCGradient2) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
}
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( MRCGradient) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( MRCGradient2) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC2 kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
hipMemcpy(h_dX1, d_dX1, size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_dX2, d_dX2, size_bytes, hipMemcpyDeviceToHost);
reference (length, h_Y, h_X1, h_X2, h_O, m, r_dX1, r_dX2);
bool ok = true;
for (int i = 0; i < length; i++) {
if (fabs(h_dX1[i] - r_dX1[i]) > 1e-3 || fabs(h_dX2[i] - r_dX2[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_X1);
hipFree(d_X2);
hipFree(d_O);
hipFree(d_Y);
hipFree(d_dX1);
hipFree(d_dX2);
free(h_X1);
free(h_X2);
free(h_O);
free(h_Y);
free(h_dX1);
free(h_dX2);
return 0;
}
| d4f2872d8cb53302dda6fe3815c377b7c1189ac4.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
void reference (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float* dX1, float* dX2) {
for (int i = 0; i < N; i++) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient2(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float y = Y[i];
float o = dOutput[i];
float dist = -y * (X1[i] - X2[i]) + margin;
dX1[i] = dist < 0.f ? 0.f : -y * o;
dX2[i] = dist < 0.f ? 0.f : y * o;
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
size_t size_bytes = length * sizeof(float);
float *h_X1 = (float*) malloc (size_bytes);
float *h_X2 = (float*) malloc (size_bytes);
float *h_O = (float*) malloc (size_bytes);
int *h_Y = ( int*) malloc (size_bytes);
float *h_dX1 = (float*) malloc (size_bytes);
float *h_dX2 = (float*) malloc (size_bytes);
float *r_dX1 = (float*) malloc (size_bytes);
float *r_dX2 = (float*) malloc (size_bytes);
const float m = 0.01; // margin
std::default_random_engine g (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < length; i++) {
h_X1[i] = distr(g);
h_X2[i] = distr(g);
h_O[i] = distr(g);
h_Y[i] = (distr(g) < 0) ? -1 : 1 ;
}
float *d_X1, *d_X2, *d_O, *d_dX1, *d_dX2;
int *d_Y;
hipMalloc((void**)&d_X1, size_bytes);
hipMemcpy(d_X1, h_X1, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_X2, size_bytes);
hipMemcpy(d_X2, h_X2, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_O, size_bytes);
hipMemcpy(d_O, h_O, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_Y, size_bytes);
hipMemcpy(d_Y, h_Y, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_dX1, size_bytes);
hipMalloc((void**)&d_dX2, size_bytes);
dim3 grid ((length + 255) / 256);
dim3 block (256);
// warmup
for (int i = 0; i < repeat; i++) {
MRCGradient <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
MRCGradient2 <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
}
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
MRCGradient <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
MRCGradient2 <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC2 kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
hipMemcpy(h_dX1, d_dX1, size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_dX2, d_dX2, size_bytes, hipMemcpyDeviceToHost);
reference (length, h_Y, h_X1, h_X2, h_O, m, r_dX1, r_dX2);
bool ok = true;
for (int i = 0; i < length; i++) {
if (fabs(h_dX1[i] - r_dX1[i]) > 1e-3 || fabs(h_dX2[i] - r_dX2[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_X1);
hipFree(d_X2);
hipFree(d_O);
hipFree(d_Y);
hipFree(d_dX1);
hipFree(d_dX2);
free(h_X1);
free(h_X2);
free(h_O);
free(h_Y);
free(h_dX1);
free(h_dX2);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.