hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
e28b6dda35fe3f2f3c29645c0aecbaeb27048d64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <hip/hip_fp16.h>
#include <assert.h>
#include "fp16_conversion.h" // host function for half conversion
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__global__
void myTest(float *x, half *y)
{
//float v=2.51f;
//float v=2.5f;
//float v=-2.5f;
//float v=-2.51f;
//float v = 27.999878f; // 27 , 28
//float v = 27.999278f; // 27f, 28h
//float v = 27.998278f; // 27, 28
//float v = 27.995278f; // 27, 28
//float v = 27.994278f; // 27, 28
//float v = 27.994f; // 27, 28
//float v = 27.991f; // 27, 27
//float v = 27.992f; // 27, 27
//float v = 27.993f; // 27, 28
//float v = 27.9921f; // 27,
float v = 27.9922f; // 27,
//float v = 27.99f; // 27, 27
x[0] = floorf(v);
half vHalf = __float2half(v);
float vHalf_fp32 = __half2float(vHalf);
printf("fp32: %f, fp16 %f, diff %f\n", v, vHalf_fp32, vHalf_fp32 - v);
y[0] = hfloor(__float2half(v));
}
int main(int argc, char** argv) {
int devid =0;
if (argc != 2) {
fprintf(stderr, "Specify device to use only. (./program devid)\n");
exit(1);
}else {
devid = atoi(argv[1]);
}
hipSetDevice(devid);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, devid);
printf("device %d : %s\n", devid, prop.name);
float *x;
checkCuda(hipMallocManaged(&x, sizeof(float)));
half *y;
checkCuda(hipMallocManaged(&y, sizeof(half)));
hipLaunchKernelGGL(( myTest), dim3(1), dim3(1), 0, 0, x, y);
checkCuda(hipDeviceSynchronize());
// check results
printf("%f\n", x[0]);
printf("%f\n", half_to_float(y[0]));
return 0;
}
| e28b6dda35fe3f2f3c29645c0aecbaeb27048d64.cu | #include <cstdio>
#include <cuda_fp16.h>
#include <assert.h>
#include "fp16_conversion.h" // host function for half conversion
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__
void myTest(float *x, half *y)
{
//float v=2.51f;
//float v=2.5f;
//float v=-2.5f;
//float v=-2.51f;
//float v = 27.999878f; // 27 , 28
//float v = 27.999278f; // 27f, 28h
//float v = 27.998278f; // 27, 28
//float v = 27.995278f; // 27, 28
//float v = 27.994278f; // 27, 28
//float v = 27.994f; // 27, 28
//float v = 27.991f; // 27, 27
//float v = 27.992f; // 27, 27
//float v = 27.993f; // 27, 28
//float v = 27.9921f; // 27,
float v = 27.9922f; // 27,
//float v = 27.99f; // 27, 27
x[0] = floorf(v);
half vHalf = __float2half(v);
float vHalf_fp32 = __half2float(vHalf);
printf("fp32: %f, fp16 %f, diff %f\n", v, vHalf_fp32, vHalf_fp32 - v);
y[0] = hfloor(__float2half(v));
}
int main(int argc, char** argv) {
int devid =0;
if (argc != 2) {
fprintf(stderr, "Specify device to use only. (./program devid)\n");
exit(1);
}else {
devid = atoi(argv[1]);
}
cudaSetDevice(devid);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, devid);
printf("device %d : %s\n", devid, prop.name);
float *x;
checkCuda(cudaMallocManaged(&x, sizeof(float)));
half *y;
checkCuda(cudaMallocManaged(&y, sizeof(half)));
myTest<<<1, 1>>>(x, y);
checkCuda(cudaDeviceSynchronize());
// check results
printf("%f\n", x[0]);
printf("%f\n", half_to_float(y[0]));
return 0;
}
|
49f180ef78dce40ef006e5136039c6e0becc645a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA device code for wf.
*
*/
#include <cuda/hip/hip_runtime_api.h>
#include <cuda/cuda.h>
#include <cuda/device_launch_parameters.h>
#include <hipfft.h>
#include "hip/hip_runtime.h"
#include "cuda-math.h"
#include "defs.h"
#include "grid_wf-cuda.h"
extern void *grid_gpu_mem_addr;
extern "C" void cuda_error_check();
/********************************************************************************************************************/
/*
* Potential energy propagation in real space (possibly with absorbing boundaries).
*
*/
/* regular */
__global__ void grid_cuda_wf_propagate_potential_gpu1(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUREAL cons, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx]));
}
/* abs using imag time */
__global__ void grid_cuda_wf_propagate_potential_gpu2(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
CUREAL tmp;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
tmp = grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz);
c.x = c.y * tmp;
c.y *= 1.0 - tmp;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx]));
}
/* abs using complex potential */
__global__ void grid_cuda_wf_propagate_potential_gpu3(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUCOMPLEX amp, CUREAL rho0, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx] - CUMAKE(0.0, 1.0) * amp * grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz)
* (CUCSQNORM(b[idx]) - rho0)));
}
/*
* Propagate potential energy in real space with absorbing boundaries.
*
* wf = Source/destination grid for operation (REAL complex *; input/output).
* pot = Potential grid (CUCOMPLEX *; input).
* time_step= Time step length (CUCOMPLEX; input).
* add_abs = Add complex abs potential? (char; input).
* amp = Amplitude for complex boundary (CUCOMPLEX; input).
* rho0 = Target value for |psi|^2 (REAL; input).
* cons = Constant to add to potential (REAL; input).
* lx = Lower bound for absorbing bc (INT; input).
* hx = Upper bound for absorbing bc (INT; input).
* ly = Lower bound for absorbing bc (INT; input).
* hy = Upper bound for absorbing bc (INT; input).
* lz = Lower bound for absorbing bc (INT; input).
* hz = Upper bound for absorbing bc (INT; input).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
* Only periodic boundaries!
*
*/
extern "C" void grid_cuda_wf_propagate_potentialW(CUCOMPLEX *grid, CUCOMPLEX *pot, CUCOMPLEX time_step, char add_abs, CUCOMPLEX amp, CUREAL rho0, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
CUCOMPLEX c;
c.x = time_step.y / HBAR;
c.y = -time_step.x / HBAR;
if(lz) {
if(add_abs)
hipLaunchKernelGGL(( grid_cuda_wf_propagate_potential_gpu3), dim3(blocks),dim3(threads), 0, 0, grid, pot, c, amp, rho0, cons, lx, hx, ly, hy, lz, hz, nx, ny, nz);
else
hipLaunchKernelGGL(( grid_cuda_wf_propagate_potential_gpu2), dim3(blocks),dim3(threads), 0, 0, grid, pot, c, cons, lx, hx, ly, hy, lz, hz, nx, ny, nz);
} else
hipLaunchKernelGGL(( grid_cuda_wf_propagate_potential_gpu1), dim3(blocks),dim3(threads), 0, 0, grid, pot, c, cons, nx, ny, nz);
cuda_error_check();
}
/********************************************************************************************************************/
/*
* Density
*
*/
__global__ void grid_cuda_wf_density_gpu(CUCOMPLEX *b, CUREAL *dens, INT nx, INT ny, INT nz, INT nz2) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx, idx2;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
idx2 = (i * ny + j) * nz2 + k;
dens[idx2] = CUCREAL(b[idx]) * CUCREAL(b[idx]) + CUCIMAG(b[idx]) * CUCIMAG(b[idx]);
}
/*
* Density
*
* wf = Source/destination grid for operation (CUCOMPLEX *; input).
* dens = Density grid (CUREAL *; output).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
*/
extern "C" void grid_cuda_wf_densityW(CUCOMPLEX *grid, CUREAL *dens, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( grid_cuda_wf_density_gpu), dim3(blocks),dim3(threads), 0, 0, grid, dens, nx, ny, nz, 2 * (nz / 2 + 1));
cuda_error_check();
}
/********************************************************************************************************************/
/*
* Add complex absorbing potential.
*
*/
__global__ void grid_cuda_wf_absorb_potential_gpu(CUCOMPLEX *gwf, CUCOMPLEX *pot, REAL amp, REAL rho0, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
REAL g, sq;
if(i >= nx || j >= ny || k >= nz) return;
if((g = grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz)) == 0.0) return;
idx = (i * ny + j) * nz + k;
sq = gwf[idx].x * gwf[idx].x + gwf[idx].y * gwf[idx].y - rho0;
pot[idx].y -= g * amp * sq;
}
/*
* Complex absorbing potential.
*
* gwf = wavefunction grid (CUCOMPLEX *; input).
* pot = potential (CUCOMPLEX *; output).
* amp = amplitude of the potential (CUREAL; input).
* rho0 = rho0 background (CUREAL; input).
* lx = lower index for abs boundary (INT; input).
* hx = upper index for abs boundary (INT; input).
* ly = lower index for abs boundary (INT; input).
* hy = upper index for abs boundary (INT; input).
* lz = lower index for abs boundary (INT; input).
* hz = upper index for abs boundary (INT; input).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
*/
extern "C" void grid_cuda_wf_absorb_potentialW(CUCOMPLEX *gwf, CUCOMPLEX *pot, REAL amp, REAL rho0, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( grid_cuda_wf_absorb_potential_gpu), dim3(blocks),dim3(threads), 0, 0, gwf, pot, amp, rho0, lx, hx, ly, hy, lz, hz, nx, ny, nz);
cuda_error_check();
}
| 49f180ef78dce40ef006e5136039c6e0becc645a.cu | /*
* CUDA device code for wf.
*
*/
#include <cuda/cuda_runtime_api.h>
#include <cuda/cuda.h>
#include <cuda/device_launch_parameters.h>
#include <cufft.h>
#include "cuda.h"
#include "cuda-math.h"
#include "defs.h"
#include "grid_wf-cuda.h"
extern void *grid_gpu_mem_addr;
extern "C" void cuda_error_check();
/********************************************************************************************************************/
/*
* Potential energy propagation in real space (possibly with absorbing boundaries).
*
*/
/* regular */
__global__ void grid_cuda_wf_propagate_potential_gpu1(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUREAL cons, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx]));
}
/* abs using imag time */
__global__ void grid_cuda_wf_propagate_potential_gpu2(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
CUREAL tmp;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
tmp = grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz);
c.x = c.y * tmp;
c.y *= 1.0 - tmp;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx]));
}
/* abs using complex potential */
__global__ void grid_cuda_wf_propagate_potential_gpu3(CUCOMPLEX *b, CUCOMPLEX *pot, CUCOMPLEX c, CUCOMPLEX amp, CUREAL rho0, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
b[idx] = b[idx] * CUCEXP(c * (cons + pot[idx] - CUMAKE(0.0, 1.0) * amp * grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz)
* (CUCSQNORM(b[idx]) - rho0)));
}
/*
* Propagate potential energy in real space with absorbing boundaries.
*
* wf = Source/destination grid for operation (REAL complex *; input/output).
* pot = Potential grid (CUCOMPLEX *; input).
* time_step= Time step length (CUCOMPLEX; input).
* add_abs = Add complex abs potential? (char; input).
* amp = Amplitude for complex boundary (CUCOMPLEX; input).
* rho0 = Target value for |psi|^2 (REAL; input).
* cons = Constant to add to potential (REAL; input).
* lx = Lower bound for absorbing bc (INT; input).
* hx = Upper bound for absorbing bc (INT; input).
* ly = Lower bound for absorbing bc (INT; input).
* hy = Upper bound for absorbing bc (INT; input).
* lz = Lower bound for absorbing bc (INT; input).
* hz = Upper bound for absorbing bc (INT; input).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
* Only periodic boundaries!
*
*/
extern "C" void grid_cuda_wf_propagate_potentialW(CUCOMPLEX *grid, CUCOMPLEX *pot, CUCOMPLEX time_step, char add_abs, CUCOMPLEX amp, CUREAL rho0, CUREAL cons, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
CUCOMPLEX c;
c.x = time_step.y / HBAR;
c.y = -time_step.x / HBAR;
if(lz) {
if(add_abs)
grid_cuda_wf_propagate_potential_gpu3<<<blocks,threads>>>(grid, pot, c, amp, rho0, cons, lx, hx, ly, hy, lz, hz, nx, ny, nz);
else
grid_cuda_wf_propagate_potential_gpu2<<<blocks,threads>>>(grid, pot, c, cons, lx, hx, ly, hy, lz, hz, nx, ny, nz);
} else
grid_cuda_wf_propagate_potential_gpu1<<<blocks,threads>>>(grid, pot, c, cons, nx, ny, nz);
cuda_error_check();
}
/********************************************************************************************************************/
/*
* Density
*
*/
__global__ void grid_cuda_wf_density_gpu(CUCOMPLEX *b, CUREAL *dens, INT nx, INT ny, INT nz, INT nz2) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx, idx2;
if(i >= nx || j >= ny || k >= nz) return;
idx = (i * ny + j) * nz + k;
idx2 = (i * ny + j) * nz2 + k;
dens[idx2] = CUCREAL(b[idx]) * CUCREAL(b[idx]) + CUCIMAG(b[idx]) * CUCIMAG(b[idx]);
}
/*
* Density
*
* wf = Source/destination grid for operation (CUCOMPLEX *; input).
* dens = Density grid (CUREAL *; output).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
*/
extern "C" void grid_cuda_wf_densityW(CUCOMPLEX *grid, CUREAL *dens, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
grid_cuda_wf_density_gpu<<<blocks,threads>>>(grid, dens, nx, ny, nz, 2 * (nz / 2 + 1));
cuda_error_check();
}
/********************************************************************************************************************/
/*
* Add complex absorbing potential.
*
*/
__global__ void grid_cuda_wf_absorb_potential_gpu(CUCOMPLEX *gwf, CUCOMPLEX *pot, REAL amp, REAL rho0, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) { /* Exectutes at GPU */
INT k = blockIdx.x * blockDim.x + threadIdx.x, j = blockIdx.y * blockDim.y + threadIdx.y, i = blockIdx.z * blockDim.z + threadIdx.z, idx;
REAL g, sq;
if(i >= nx || j >= ny || k >= nz) return;
if((g = grid_cuda_wf_absorb(i, j, k, lx, hx, ly, hy, lz, hz)) == 0.0) return;
idx = (i * ny + j) * nz + k;
sq = gwf[idx].x * gwf[idx].x + gwf[idx].y * gwf[idx].y - rho0;
pot[idx].y -= g * amp * sq;
}
/*
* Complex absorbing potential.
*
* gwf = wavefunction grid (CUCOMPLEX *; input).
* pot = potential (CUCOMPLEX *; output).
* amp = amplitude of the potential (CUREAL; input).
* rho0 = rho0 background (CUREAL; input).
* lx = lower index for abs boundary (INT; input).
* hx = upper index for abs boundary (INT; input).
* ly = lower index for abs boundary (INT; input).
* hy = upper index for abs boundary (INT; input).
* lz = lower index for abs boundary (INT; input).
* hz = upper index for abs boundary (INT; input).
* nx = # of points along x (INT).
* ny = # of points along y (INT).
* nz = # of points along z (INT).
*
*/
extern "C" void grid_cuda_wf_absorb_potentialW(CUCOMPLEX *gwf, CUCOMPLEX *pot, REAL amp, REAL rho0, INT lx, INT hx, INT ly, INT hy, INT lz, INT hz, INT nx, INT ny, INT nz) {
dim3 threads(CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK, CUDA_THREADS_PER_BLOCK);
dim3 blocks((nz + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(ny + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK,
(nx + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK);
grid_cuda_wf_absorb_potential_gpu<<<blocks,threads>>>(gwf, pot, amp, rho0, lx, hx, ly, hy, lz, hz, nx, ny, nz);
cuda_error_check();
}
|
4bd05dd85253676cd42422366d3e3a4f7e36eef1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <UnitTest++.h>
#include <iostream>
#include <iomanip>
#include "MonteRayDefinitions.hh"
#include "GPUUtilityFunctions.hh"
#include "Tally.hh"
#ifdef __HIPCC__
CUDA_CALLABLE_KERNEL kernelScore(MonteRay::Tally* const pTally, MonteRay::Tally::TallyFloat value) {
if (threadIdx.x < pTally->size()){
for (int i = 0; i < pTally->nTimeBins(); i++){
for (int j = 0; j < pTally->nEnergyBins(); j++){
pTally->score(value, threadIdx.x, j, i);
}
}
}
}
#endif
SUITE( Tally_ptester ) {
struct TallyFixture {
using DataFloat = MonteRay::Tally::DataFloat;
MonteRay::Tally::Builder tallyBuilder;
TallyFixture() {
int nSpatialBins = 10;
MonteRay::Vector<DataFloat> energyBins = {2.5e-5, 1.0, 10.0};
MonteRay::Vector<DataFloat> timeBins = {1.5, 10.0};
bool useStats = true;
tallyBuilder.timeBinEdges(timeBins);
tallyBuilder.energyBinEdges(energyBins);
tallyBuilder.spatialBins(nSpatialBins);
tallyBuilder.useStats(useStats);
}
};
TEST_FIXTURE(TallyFixture, score_and_gather ) {
auto pTally = std::make_unique<MonteRay::Tally>(tallyBuilder.build());
pTally->score(1.0, 0);
pTally->score(1.0, 9);
pTally->gatherWorkGroup();
pTally->gather();
const auto& PA = MonteRay::MonteRayParallelAssistant::getInstance();
std::cout << " world Size " << PA.getWorldSize() << " world rank " << PA.getWorldRank() << std::endl;
if( PA.getWorldRank() == 0 ) {
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(0), 1e-6);
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(9), 1e-6);
}
pTally->score(1.0, 0);
pTally->gatherWorkGroup();
pTally->gather();
if( PA.getWorldRank() == 0 ) {
CHECK_CLOSE( 2.0*PA.getWorldSize(), pTally->contribution(0), 1e-6);
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(9), 1e-6);
} else {
CHECK_CLOSE( 0.0, pTally->contribution(0,0), 1e-6);
}
}
#ifdef __HIPCC__
TEST_FIXTURE(TallyFixture, scoreOnGPUThenGather) {
constexpr int nBlocks = 100;
constexpr int nThreadsPerBlock = 32;
MonteRay::Vector<DataFloat> binEdges = {0.5, 1.5};
tallyBuilder.energyBinEdges( binEdges );
tallyBuilder.timeBinEdges( binEdges );
tallyBuilder.spatialBins(nThreadsPerBlock);
auto pTally = std::make_unique<MonteRay::Tally>(tallyBuilder.build());
CHECK_EQUAL(9*nThreadsPerBlock, pTally->size());
hipLaunchKernelGGL(( kernelScore), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, pTally.get(), 1.0);
hipDeviceSynchronize();
pTally->gatherWorkGroup(); // first gather all work-group ranks
pTally->gather(); // now gather between masters of work groups
const auto& PA = MonteRay::MonteRayParallelAssistant::getInstance();
if( PA.getWorldRank() == 0 ) {
for (size_t i = 0; i < pTally->size(); i++){
if (static_cast<double>(nBlocks) - pTally->contribution(i) > 1.0e-6) std::cout << i << " test \n";
CHECK_CLOSE(PA.getWorldSize()*static_cast<MonteRay::Tally::TallyFloat>(nBlocks), pTally->contribution(i), 1e-6);
}
}
}
#endif
} // end namespace
| 4bd05dd85253676cd42422366d3e3a4f7e36eef1.cu | #include <UnitTest++.h>
#include <iostream>
#include <iomanip>
#include "MonteRayDefinitions.hh"
#include "GPUUtilityFunctions.hh"
#include "Tally.hh"
#ifdef __CUDACC__
CUDA_CALLABLE_KERNEL kernelScore(MonteRay::Tally* const pTally, MonteRay::Tally::TallyFloat value) {
if (threadIdx.x < pTally->size()){
for (int i = 0; i < pTally->nTimeBins(); i++){
for (int j = 0; j < pTally->nEnergyBins(); j++){
pTally->score(value, threadIdx.x, j, i);
}
}
}
}
#endif
SUITE( Tally_ptester ) {
struct TallyFixture {
using DataFloat = MonteRay::Tally::DataFloat;
MonteRay::Tally::Builder tallyBuilder;
TallyFixture() {
int nSpatialBins = 10;
MonteRay::Vector<DataFloat> energyBins = {2.5e-5, 1.0, 10.0};
MonteRay::Vector<DataFloat> timeBins = {1.5, 10.0};
bool useStats = true;
tallyBuilder.timeBinEdges(timeBins);
tallyBuilder.energyBinEdges(energyBins);
tallyBuilder.spatialBins(nSpatialBins);
tallyBuilder.useStats(useStats);
}
};
TEST_FIXTURE(TallyFixture, score_and_gather ) {
auto pTally = std::make_unique<MonteRay::Tally>(tallyBuilder.build());
pTally->score(1.0, 0);
pTally->score(1.0, 9);
pTally->gatherWorkGroup();
pTally->gather();
const auto& PA = MonteRay::MonteRayParallelAssistant::getInstance();
std::cout << " world Size " << PA.getWorldSize() << " world rank " << PA.getWorldRank() << std::endl;
if( PA.getWorldRank() == 0 ) {
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(0), 1e-6);
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(9), 1e-6);
}
pTally->score(1.0, 0);
pTally->gatherWorkGroup();
pTally->gather();
if( PA.getWorldRank() == 0 ) {
CHECK_CLOSE( 2.0*PA.getWorldSize(), pTally->contribution(0), 1e-6);
CHECK_CLOSE( 1.0*PA.getWorldSize(), pTally->contribution(9), 1e-6);
} else {
CHECK_CLOSE( 0.0, pTally->contribution(0,0), 1e-6);
}
}
#ifdef __CUDACC__
TEST_FIXTURE(TallyFixture, scoreOnGPUThenGather) {
constexpr int nBlocks = 100;
constexpr int nThreadsPerBlock = 32;
MonteRay::Vector<DataFloat> binEdges = {0.5, 1.5};
tallyBuilder.energyBinEdges( binEdges );
tallyBuilder.timeBinEdges( binEdges );
tallyBuilder.spatialBins(nThreadsPerBlock);
auto pTally = std::make_unique<MonteRay::Tally>(tallyBuilder.build());
CHECK_EQUAL(9*nThreadsPerBlock, pTally->size());
kernelScore<<<nBlocks, nThreadsPerBlock>>>(pTally.get(), 1.0);
cudaDeviceSynchronize();
pTally->gatherWorkGroup(); // first gather all work-group ranks
pTally->gather(); // now gather between masters of work groups
const auto& PA = MonteRay::MonteRayParallelAssistant::getInstance();
if( PA.getWorldRank() == 0 ) {
for (size_t i = 0; i < pTally->size(); i++){
if (static_cast<double>(nBlocks) - pTally->contribution(i) > 1.0e-6) std::cout << i << " test \n";
CHECK_CLOSE(PA.getWorldSize()*static_cast<MonteRay::Tally::TallyFloat>(nBlocks), pTally->contribution(i), 1e-6);
}
}
}
#endif
} // end namespace
|
double_disk_center.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/model/double_disk_center.cu
/// @brief The implementation of the generator for double disk shape particles with a large particle at each center
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/model.hpp>
#include <cmath>
#include <nbfmm/core/kernel_function.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_model
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Generate double disk shape particles with a large particle at each center
///
/// @param[in] num_particle the number of particles.
/// @param[in] offset the offset of previous particle positions.
/// @param[out] position_previous the previous particle positions.
///
__global__ void generateDoubleDiskCenterDevice(
const int num_particle,
float2 offset,
float2* position_previous
) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= num_particle ) {
return;
}
position_previous[idx] += offset;
}
/// @}
// Generate double disk shape particles with a large particle at each center
void nbfmm::model::generateDoubleDiskCenter(
const int num_particle1,
const int num_particle2,
const float2 center_position1,
const float2 center_position2,
const float radius1,
const float radius2,
const float weight,
const float center_weight1,
const float center_weight2,
const float eccentricity,
const float tick,
float2* gpuptr_position_current,
float2* gpuptr_position_previous,
float* gpuptr_weight_current
) {
generateDiskCenter(num_particle1, center_position1, radius1, weight, center_weight1, tick,
gpuptr_position_current, gpuptr_position_previous, gpuptr_weight_current);
generateDiskCenter(num_particle2, center_position2, radius2, weight, center_weight2, tick,
gpuptr_position_current+num_particle1, gpuptr_position_previous+num_particle1,
gpuptr_weight_current+num_particle1);
const float weight1 = weight * (num_particle1-1) + center_weight1;
const float weight2 = weight * (num_particle2-1) + center_weight2;
const float2 effect1 = kernelFunction(center_position1, center_position2, weight2);
const float2 effect2 = kernelFunction(center_position2, center_position1, weight1);
float2 distance = center_position1 - center_position2;
float r = sqrt(distance.x * distance.x + distance.y * distance.y);
float a1 = sqrt(effect1.x * effect1.x + effect1.y * effect1.y);
float a2 = sqrt(effect2.x * effect2.x + effect2.y * effect2.y);
float r1 = r * weight2 / (weight1 + weight2);
float r2 = r * weight1 / (weight1 + weight2);
float2 offset1;
offset1.x = -effect1.y; offset1.y = effect1.x;
offset1 *= sqrt(r1/a1) * tick / exp2(eccentricity);
offset1 -= effect1 * tick * tick * eccentricity;
float2 offset2;
offset2.x = -effect2.y; offset2.y = effect2.x;
offset2 *= sqrt(r2/a2) * tick / exp2(eccentricity);
offset2 -= effect2 * tick * tick * eccentricity;
hipLaunchKernelGGL(( generateDoubleDiskCenterDevice), dim3(kMaxBlockDim), dim3(((num_particle1-1)/kMaxBlockDim)+1), 0, 0,
num_particle1, offset1, gpuptr_position_previous
);
hipLaunchKernelGGL(( generateDoubleDiskCenterDevice), dim3(kMaxBlockDim), dim3(((num_particle2-1)/kMaxBlockDim)+1), 0, 0,
num_particle2, offset2, gpuptr_position_previous+num_particle1
);
}
| double_disk_center.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/model/double_disk_center.cu
/// @brief The implementation of the generator for double disk shape particles with a large particle at each center
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/model.hpp>
#include <cmath>
#include <nbfmm/core/kernel_function.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_model
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Generate double disk shape particles with a large particle at each center
///
/// @param[in] num_particle the number of particles.
/// @param[in] offset the offset of previous particle positions.
/// @param[out] position_previous the previous particle positions.
///
__global__ void generateDoubleDiskCenterDevice(
const int num_particle,
float2 offset,
float2* position_previous
) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= num_particle ) {
return;
}
position_previous[idx] += offset;
}
/// @}
// Generate double disk shape particles with a large particle at each center
void nbfmm::model::generateDoubleDiskCenter(
const int num_particle1,
const int num_particle2,
const float2 center_position1,
const float2 center_position2,
const float radius1,
const float radius2,
const float weight,
const float center_weight1,
const float center_weight2,
const float eccentricity,
const float tick,
float2* gpuptr_position_current,
float2* gpuptr_position_previous,
float* gpuptr_weight_current
) {
generateDiskCenter(num_particle1, center_position1, radius1, weight, center_weight1, tick,
gpuptr_position_current, gpuptr_position_previous, gpuptr_weight_current);
generateDiskCenter(num_particle2, center_position2, radius2, weight, center_weight2, tick,
gpuptr_position_current+num_particle1, gpuptr_position_previous+num_particle1,
gpuptr_weight_current+num_particle1);
const float weight1 = weight * (num_particle1-1) + center_weight1;
const float weight2 = weight * (num_particle2-1) + center_weight2;
const float2 effect1 = kernelFunction(center_position1, center_position2, weight2);
const float2 effect2 = kernelFunction(center_position2, center_position1, weight1);
float2 distance = center_position1 - center_position2;
float r = sqrt(distance.x * distance.x + distance.y * distance.y);
float a1 = sqrt(effect1.x * effect1.x + effect1.y * effect1.y);
float a2 = sqrt(effect2.x * effect2.x + effect2.y * effect2.y);
float r1 = r * weight2 / (weight1 + weight2);
float r2 = r * weight1 / (weight1 + weight2);
float2 offset1;
offset1.x = -effect1.y; offset1.y = effect1.x;
offset1 *= sqrt(r1/a1) * tick / exp2(eccentricity);
offset1 -= effect1 * tick * tick * eccentricity;
float2 offset2;
offset2.x = -effect2.y; offset2.y = effect2.x;
offset2 *= sqrt(r2/a2) * tick / exp2(eccentricity);
offset2 -= effect2 * tick * tick * eccentricity;
generateDoubleDiskCenterDevice<<<kMaxBlockDim, ((num_particle1-1)/kMaxBlockDim)+1>>>(
num_particle1, offset1, gpuptr_position_previous
);
generateDoubleDiskCenterDevice<<<kMaxBlockDim, ((num_particle2-1)/kMaxBlockDim)+1>>>(
num_particle2, offset2, gpuptr_position_previous+num_particle1
);
}
|
77339402a117b5a1027afbd72d21dfcff087faf0.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <torch/torch.h>
/* Includes, cuda */
#include <rocblas.h>
#include <hip/hip_runtime.h>
// includes cublaslt
#include <cublasLt.h>
// constants for fused bias+relu kernel
#define BIAS_RELU_FW_NTHREADS 128 // forward number of thread per block
#define BIAS_RELU_BW_NTHREADS_X 32 // backward number of thread in feature dim
#define BIAS_RELU_BW_NTHREADS_Y 16 // backward number of thread in batch dim
#define BIAS_RELU_RED_PER_THREAD 16 // backward minimal reduction length per thread
// move to a header later on
#define ILP 4
template<typename T>
__host__ __device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, volatile T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename T>
__device__ __forceinline__ void load_store(volatile T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
// Keep ReLU in float only. When using half, cast to float before calling.
__device__ __inline__ float relu(float a) {
float retf = max(a, 0.f);
return (retf);
}
// Keep Sigmoid in float only. When using half, cast to float before calling.
__device__ __inline__ float sigmoid(float a) {
float retf = 1.f / (1.f + expf(-a));
return (retf);
}
// FP64 Wrapper around cublas GEMMEx
hipblasStatus_t mlp_gemm(
hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const double* A,
int lda,
const double* B,
int ldb,
const float* beta,
double* C,
int ldc) {
return hipblasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
HIP_R_64F,
lda,
B,
HIP_R_64F,
ldb,
beta,
C,
HIP_R_64F,
ldc,
HIP_R_64F,
HIPBLAS_GEMM_DEFAULT);
}
// FP32 Wrapper around cublas GEMMEx
hipblasStatus_t mlp_gemm(
hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc) {
return hipblasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
HIP_R_32F,
lda,
B,
HIP_R_32F,
ldb,
beta,
C,
HIP_R_32F,
ldc,
HIP_R_32F,
HIPBLAS_GEMM_DEFAULT);
}
// FP16 Tensor core wrapper around cublas GEMMEx
hipblasStatus_t mlp_gemm(
hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const at::Half* A,
int lda,
const at::Half* B,
int ldb,
float* beta,
at::Half* C,
int ldc) {
return hipblasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
HIP_R_16F,
lda,
B,
HIP_R_16F,
ldb,
beta,
C,
HIP_R_16F,
ldc,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const at::Half* A,
int lda,
const at::Half* B,
int ldb,
float *beta, /* host pointer */
at::Half* C,
int ldc,
void *workspace,
size_t workspaceSize,
hipStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
cublasLtMatmulDescOpaque_t operationDesc = {};
cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {};
cublasLtMatmulPreferenceOpaque_t preference = {};
int returnedResults = 0;
cublasLtMatmulHeuristicResult_t heuristicResult = {};
cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT;
// Create operation descriptor; see cublasLtMatmulDescAttributes_t
// for details about defaults; here we just set the transforms for
// A and B.
status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, HIP_R_32F);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
if (use_bias) {
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias));
if (status != HIPBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU_BIAS;
} else {
epilogue = CUBLASLT_EPILOGUE_BIAS;
}
} else {
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU;
}
}
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue));
if (status != HIPBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
// Create matrix descriptors. Not setting any extra attributes.
status = cublasLtMatrixLayoutInit(
&Adesc, HIP_R_16F, transa == HIPBLAS_OP_N ? m : k, transa == HIPBLAS_OP_N ? k : m, lda);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(
&Bdesc, HIP_R_16F, transb == HIPBLAS_OP_N ? k : n, transb == HIPBLAS_OP_N ? n : k, ldb);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(&Cdesc, HIP_R_16F, m, n, ldc);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
// Create preference handle; In general, extra attributes can be
// used here to disable tensor ops or to make sure algo selected
// will work with badly aligned A, B, C. However, for simplicity
// here we assume A,B,C are always well aligned (e.g., directly
// come from hipMalloc)
status = cublasLtMatmulPreferenceInit(&preference);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulPreferenceSetAttribute(
&preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
// We just need the best available heuristic to try and run matmul.
// There is no guarantee that this will work. For example, if A is
// badly aligned, you can request more (e.g. 32) algos and try to
// run them one by one until something works.
status = cublasLtMatmulAlgoGetHeuristic(
ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
if (returnedResults == 0) {
status = HIPBLAS_STATUS_NOT_SUPPORTED;
goto CLEANUP;
}
status = cublasLtMatmul(ltHandle,
&operationDesc,
alpha,
A,
&Adesc,
B,
&Bdesc,
beta,
C,
&Cdesc,
C,
&Cdesc,
&heuristicResult.algo,
workspace,
workspaceSize,
stream);
CLEANUP:
// Descriptors are no longer needed as all GPU work was already
// enqueued.
return status == HIPBLAS_STATUS_SUCCESS ? 0 : 1;
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const double* A,
int lda,
const double* B,
int ldb,
float *beta, /* host pointer */
double* C,
int ldc,
void *workspace,
size_t workspaceSize,
hipStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
return 1;
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const float *A,
int lda,
const float *B,
int ldb,
float *beta, /* host pointer */
float *C,
int ldc,
void *workspace,
size_t workspaceSize,
hipStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
cublasLtMatmulDescOpaque_t operationDesc = {};
cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {};
cublasLtMatmulPreferenceOpaque_t preference = {};
int returnedResults = 0;
cublasLtMatmulHeuristicResult_t heuristicResult = {};
cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT;
// Create operation descriptor; see cublasLtMatmulDescAttributes_t
// for details about defaults; here we just set the transforms for
// A and B.
status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, HIP_R_32F);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
if (use_bias) {
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias));
if (status != HIPBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU_BIAS;
} else {
epilogue = CUBLASLT_EPILOGUE_BIAS;
}
} else {
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU;
}
}
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue));
if (status != HIPBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
// Create matrix descriptors. Not setting any extra attributes.
status = cublasLtMatrixLayoutInit(
&Adesc, HIP_R_32F, transa == HIPBLAS_OP_N ? m : k, transa == HIPBLAS_OP_N ? k : m, lda);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(
&Bdesc, HIP_R_32F, transb == HIPBLAS_OP_N ? k : n, transb == HIPBLAS_OP_N ? n : k, ldb);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(&Cdesc, HIP_R_32F, m, n, ldc);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
// Create preference handle; In general, extra attributes can be
// used here to disable tensor ops or to make sure algo selected
// will work with badly aligned A, B, C. However, for simplicity
// here we assume A,B,C are always well aligned (e.g., directly
// come from hipMalloc)
status = cublasLtMatmulPreferenceInit(&preference);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulPreferenceSetAttribute(
&preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize));
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
// We just need the best available heuristic to try and run matmul.
// There is no guarantee that this will work. For example, if A is
// badly aligned, you can request more (e.g. 32) algos and try to
// run them one by one until something works.
status = cublasLtMatmulAlgoGetHeuristic(
ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults);
if (status != HIPBLAS_STATUS_SUCCESS) goto CLEANUP;
if (returnedResults == 0) {
status = HIPBLAS_STATUS_NOT_SUPPORTED;
goto CLEANUP;
}
status = cublasLtMatmul(ltHandle,
&operationDesc,
alpha,
A,
&Adesc,
B,
&Bdesc,
beta,
C,
&Cdesc,
C,
&Cdesc,
&heuristicResult.algo,
workspace,
workspaceSize,
stream);
CLEANUP:
// Descriptors are no longer needed as all GPU work was already
// enqueued.
return status == HIPBLAS_STATUS_SUCCESS ? 0 : 1;
}
// Bias ADD. Assume input X is [features x batch size], column major.
// Bias is one 'features' long vector, with implicit broadcast.
template <typename T>
__global__ void biasAdd_fprop(T *X, T *b, uint batch_size, uint features) {
T r_x[ILP];
T r_b[ILP];
if(is_aligned(X) && is_aligned(b) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
int row = tid % (features / ILP);
load_store(r_x, X, 0 , tid);
load_store(r_b, b, 0 , row);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = bias_sum;
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
int row = tid % features;
r_x[ii] = X[idx];
r_b[ii] = b[row];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = bias_sum;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// Bias ADD + ReLU. Assume input X is [features x batch size], column major.
// Activation support fuesed ReLU. Safe to call in-place.
template <typename T>
__global__ void biasAddRelu_fprop(T *X, T *b, uint batch_size, uint features) {
T r_x[ILP];
T r_b[ILP];
if(is_aligned(X) && is_aligned(b) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
int row = tid % (features / ILP);
load_store(r_x, X, 0 , tid);
load_store(r_b, b, 0 , row);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = relu(bias_sum);
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
int row = tid % features;
r_x[ii] = X[idx];
r_b[ii] = b[row];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = relu(bias_sum);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// ReLU. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Relu_fprop(T *X, uint batch_size, uint features) {
T r_x[ILP];
if(is_aligned(X) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_x, X, 0 , tid);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = relu(static_cast<float>(r_x[ii]));
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_x[ii] = X[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = relu(static_cast<float>(r_x[ii]));
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// Sigmoid. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Sigmoid_fprop(T *X, uint batch_size, uint features) {
T r_x[ILP];
if(is_aligned(X) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_x, X, 0 , tid);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = sigmoid(static_cast<float>(r_x[ii]));
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_x[ii] = X[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = sigmoid(static_cast<float>(r_x[ii]));
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// ReLU. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Relu_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) {
T r_dy[ILP];
T r_y[ILP];
if(is_aligned(dY) &&
is_aligned(Y) &&
is_aligned(dX) &&
features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_dy, dY, 0 , tid);
load_store(r_y, Y, 0 , tid);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
}
load_store(dX, r_dy, tid, 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_dy[ii] = dY[idx];
r_y[ii] = Y[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
dX[idx] = r_dy[ii];
}
}
}
}
}
// Sigmoid. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Sigmoid_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) {
T r_dy[ILP];
T r_y[ILP];
if(is_aligned(dY) &&
is_aligned(Y) &&
is_aligned(dX) &&
features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_dy, dY, 0 , tid);
load_store(r_y, Y, 0 , tid);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
float grad_out = r_dy[ii];
float out = r_y[ii];
float grad_i = out * ( 1.f - out) * grad_out;
r_dy[ii] = grad_i;
}
load_store(dX, r_dy, tid, 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_dy[ii] = dY[idx];
r_y[ii] = Y[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float grad_out = r_dy[ii];
float out = r_y[ii];
float grad_i = out * ( 1.f - out) * grad_out;
r_dy[ii] = grad_i;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
dX[idx] = r_dy[ii];
}
}
}
}
}
// Compute grid size for pointwise backward kernel.
// block_x/y is total elment being handled per block, not number of threads
void get_biasAddRelu_bprop_grid_size(
int yfeat,
int batch_size,
int block_x,
int block_y,
int* grid_x,
int* grid_y) {
*grid_x = (yfeat + block_x - 1) / block_x;
// Get number of SMs for efficient reduction.
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
// can switch to occupancy calculation. use 4 below now for sm_70
int max_blocks_y = num_SMs * 4 / (*grid_x);
// block_y should be from minimal work per thread
int nRedSplits = (batch_size + block_y - 1) / block_y;
// increase number of elem per thread redcution to not launch more than enough
// kernel adjust work, so here we just launch max block
*grid_y = ::min(nRedSplits, max_blocks_y);
return;
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAdd_bprop(
T* dY,
int features,
int batch_size,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y];
// Accumulate db in FP32 always
float db_local = 0;
if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int64_t row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
db_local += (float)dY[flat_idx];
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int64_t row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
#pragma unroll 4
for (int u = 0; u < UNROLL_FACTOR; u++) {
db_local += (float)dY[flat_idx];
flat_idx += features;
}
}
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
smem[linear_idx] = db_local;
}
__syncthreads();
if (f < features) {
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
db_local += smem[yidx * blockDim.x + threadIdx.x];
}
// block result is in db_local now for all threadIdx.y == 0
// Write out partial result
out[f] = db_local;
}
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
db_local = 0;
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock && f < features) {
if(threadIdx.y == 0) {
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
db_local += (float)(intermediate[col * features + row]);
}
db[f] = (T)db_local;
}
}
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAddRelu_bprop(
T* Y,
T* dY,
int features,
int batch_size,
T* dX,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y];
// Accumulate db in FP32 always
float db_local = 0;
if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
T y_val = Y[flat_idx];
T dy_val = dY[flat_idx];
T dx_val;
if ((float)y_val > 0.f)
dx_val = dy_val;
else
dx_val = 0;
dX[flat_idx] = dx_val;
db_local += (float)dx_val;
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
#pragma unroll 4
for (int u = 0; u < UNROLL_FACTOR; u++) {
T y_val = Y[flat_idx];
T dy_val = dY[flat_idx];
T dx_val;
if ((float)y_val > 0.f)
dx_val = dy_val;
else
dx_val = 0;
dX[flat_idx] = dx_val;
db_local += (float)dx_val;
flat_idx += features;
}
}
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
smem[linear_idx] = db_local;
}
__syncthreads();
if (f < features) {
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
db_local += smem[yidx * blockDim.x + threadIdx.x];
}
// block result is in db_local now for all threadIdx.y == 0
// Write out partial result
out[f] = db_local;
}
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
db_local = 0;
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock && f < features) {
if(threadIdx.y == 0) {
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
db_local += (float)(intermediate[col * features + row]);
}
db[f] = (T)db_local;
}
}
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAddRelu_bprop_aligned(
T* Y,
T* dY,
int features,
int batch_size,
T* dX,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// Accumulate db in FP32 always
float db_local[ILP];
T r_y[ILP];
T r_dy[ILP];
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] = 0.f;
}
// f always <= features in this case
//if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features / ILP + row;
load_store(r_y, Y, 0, flat_idx);
load_store(r_dy, dY, 0, flat_idx);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
db_local[ii] += (float)r_dy[ii];
}
load_store(dX, r_dy, flat_idx, 0);
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features / ILP + row; // total threads in x == features/ILP
#pragma unroll
for (int u = 0; u < UNROLL_FACTOR; u++) {
load_store(r_y, Y, 0, flat_idx);
load_store(r_dy, dY, 0, flat_idx);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
db_local[ii] += (float)r_dy[ii];
}
load_store(dX, r_dy, flat_idx, 0);
flat_idx += features/ILP;
}
}
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y*ILP];
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
float* smem_out = smem + ILP * linear_idx;
#pragma unroll
for(int ii=0;ii<ILP;ii++){
smem_out[ii] = db_local[ii]; // reuse local dy buffer
}
__syncthreads();
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
float* smem_in = smem + ILP * (yidx * blockDim.x + threadIdx.x);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] += smem_in[ii]; // reuse local dy buffer
}
}
// block result is in db_local now for all threadIdx.y == 0
if(gridDim.y == 1) {
#pragma unroll
for(int ii=0;ii<ILP;ii++){
r_dy[ii] = db_local[ii]; // reuse local dy buffer
}
load_store(db, r_dy, f, 0);
return;
}
// Write out partial result
load_store(out, db_local, f, 0);
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] = 0.f;
}
float r_db[ILP];
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock) {
if(threadIdx.y == 0){
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
load_store(r_db, intermediate, 0, col * features / ILP + row);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] += r_db[ii];
}
}
#pragma unroll
for(int ii=0;ii<ILP;ii++){
r_dy[ii] = db_local[ii]; // reuse local dy buffer
}
load_store(db, r_dy, f, 0);
}
}
}
// Lists where the num_layers-1 intermediate Y buffers start in reserved space on fprop, starting
// offset 0. The last Y value is, of course, stored in the user provided output buffer.
void get_y_offsets(
int batch_size,
int num_layers,
const int* output_features,
int* y_start_offsets) {
y_start_offsets[0] = 0;
for (int i = 1; i < num_layers; i++) {
y_start_offsets[i] = y_start_offsets[i - 1] + batch_size * output_features[i - 1];
}
}
// Returns the reserved space (in elements) needed for the MLP
size_t get_mlp_reserved_space(int64_t batch_size, int num_layers, const int* output_features) {
size_t res_space = 0;
// Need to store output of every intermediate MLP - size equal to output_features[i] * batch_size
// for all 'i' in [0, num_layers-1)
for (int l = 0; l < num_layers; l++) {
res_space += output_features[l] * batch_size;
}
return res_space;
}
// Returns the size of all fprop activations combined
size_t get_all_activations_size(int64_t batch_size, int num_layers, const int* output_features) {
size_t acts_size = 0;
for (int l = 0; l < num_layers; l++) {
acts_size += output_features[l] * batch_size;
}
return acts_size;
}
#if 0
// Returns the work space (in elements) needed for the MLP bprop.
size_t get_mlp_bp_workspace (int batch_size, int num_layers, const int* output_features) {
/*
Workspace is partitioned as
DY_GEMMs : DX_GEMMs
*/
size_t work_space = 0;
// Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p
// of biasReLU_bp and one for o/p of dgrad GEMM).
work_space += 2*get_all_activations_size(batch_size, num_layers, output_features);
return work_space;
}
#endif
// Scratch space needed for reductions in number of elements
size_t get_reduction_scratch_space(int batch_size, int num_layers, const int* output_features) {
size_t max_scratch_space = 0;
// Loop over all layers to see which one needs the max scratch space
for (int l = 0; l < num_layers; l++) {
// need to find max(aligned, not_aligned)
int tmp, res0, res1;
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(
output_features[l], batch_size, block_x, block_y, &tmp, &res0);
block_x = ILP * BIAS_RELU_BW_NTHREADS_X;
get_biasAddRelu_bprop_grid_size(
output_features[l], batch_size, block_x, block_y, &tmp, &res1);
max_scratch_space = ::max(max_scratch_space, (size_t)(output_features[l] * res0));
max_scratch_space = ::max(max_scratch_space, (size_t)(output_features[l] * res1));
}
return max_scratch_space;
}
// Buffer for semaphores
size_t get_semaphores_size(int num_layers, const int* output_features) {
// Upper bound on semaphores is one per feature for the layer
// with the most features.
int max_features = 0;
for (int l = 0; l < num_layers; l++) {
max_features = ::max(max_features, output_features[l]);
}
return (size_t)max_features;
}
// Returns the work space (in elements) needed for the MLP bprop.
template <typename T>
size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features) {
size_t work_space = 0;
// Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p
// of biasReLU_bp and one for o/p of dgrad GEMM).
work_space += 2 * get_all_activations_size(batch_size, num_layers, output_features) * sizeof(T);
work_space +=
get_reduction_scratch_space(batch_size, num_layers, output_features) * sizeof(float);
work_space += get_semaphores_size(num_layers, output_features) * sizeof(int);
return work_space;
}
// Returns pointers to each segment of the workspace
template <typename T>
void partition_mlp_bp_workspace(
int batch_size,
int num_layers,
const int* output_features,
void* work_space,
T** dy_gemms,
T** dx_gemms,
float** db_scratch,
int** semaphores) {
/*
Workspace is partitioned as
DY_GEMMs : DX_GEMMs : DB_SCRATCH : SEMAPHORES
*/
// Start address where dy_gemm tensors are stored
*dy_gemms = reinterpret_cast<T*>(work_space);
// Start address where dx_gemm tensors are stored
*dx_gemms = *dy_gemms + get_all_activations_size(batch_size, num_layers, output_features);
// Start address where db intermediate tensors are stored
*db_scratch = reinterpret_cast<float*>(
*dx_gemms + get_all_activations_size(batch_size, num_layers, output_features));
// Start address of semaphores
*semaphores = reinterpret_cast<int*>(
*db_scratch + get_reduction_scratch_space(batch_size, num_layers, output_features));
return;
}
// Does a simple MLP fprop (GEMM+bias+ReLU).
// Can handle num_layers number of layers, each with its own shape. Output of layer i is assumed
// to be input of layer i+1. output_features, WPtr and BPtr are arrays of length num_layers, and
// must be in the same order i.e. WPtr[i] and BPtr[i] are respectively the weight and bias of layer
// 'i'.
template <typename T>
int mlp_fp(
T* X,
int input_features,
int batch_size,
T** WPtr,
int num_layers,
int* output_features,
T** BPtr,
T* Y,
T* reserved_space,
int use_bias,
int activation,
void* lt_workspace) {
T *weight, *input, *output, *bias;
T *reserved_space_x, *reserved_space_y;
reserved_space_x = NULL;
reserved_space_y = reserved_space;
// Get cublas handle from Pytorch
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cublasLtHandle_t ltHandle;
hipblasStatus_t lthandle_status;
lthandle_status = cublasLtCreate(<Handle);
// Get the stream from cublas handle to reuse for biasReLU kernel.
hipStream_t stream;
hipblasGetStream(handle, &stream);
for (int layer = 0; layer < num_layers; layer++) {
weight = WPtr[layer];
input = (layer == 0) ? X : reserved_space_x;
output = (layer == num_layers - 1) ? Y : reserved_space_y;
if (use_bias) {
bias = BPtr[layer];
}
int ifeat = (layer == 0) ? input_features : output_features[layer - 1];
int ofeat = output_features[layer];
float one = 1.f;
float zero = 0.f;
// try with cublaslt first for supported case with valid handle
int cublaslt_status = 1;
if(lthandle_status == HIPBLAS_STATUS_SUCCESS && activation < 2){
cublaslt_status = mlp_gemm_lt(
ltHandle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
ofeat,
batch_size,
ifeat,
&one,
weight,
ifeat,
input,
ifeat,
&zero,
output,
ofeat,
lt_workspace,
1 << 22,
stream,
use_bias == 1,
activation == 1,
bias);
}
// if cublaslt failed or not executed, fallback to cublas
if (cublaslt_status != 0) {
hipblasStatus_t cublas_status;
// Call GEMM: fprop is Y = W'X
cublas_status = mlp_gemm(
handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
ofeat,
batch_size,
ifeat,
&one,
weight,
ifeat,
input,
ifeat,
&zero,
output,
ofeat);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM fprop failed with %d\n", cublas_status);
return 1;
}
const uint &input_size = ofeat;
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
// Call biasReLU
if(use_bias == 1) {
if (activation == 0) { // no activation
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( biasAdd_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size);
} else if (activation == 1) { // relu
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAddRelu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( biasAddRelu_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size);
} else if (activation == 2) { // sigmoid
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( biasAdd_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Sigmoid_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size);
}
} else {
// don't need to do anything in case of no activation and no bias
if (activation == 1) { // relu
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Relu_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size);
} else if (activation == 2) { // sigmoid
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Sigmoid_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size);
}
}
}
// Set current output as next layer input
reserved_space_x = reserved_space_y;
// Set next layer output
reserved_space_y += ofeat * batch_size;
}
if(lthandle_status == HIPBLAS_STATUS_SUCCESS) cublasLtDestroy(ltHandle);
return 0;
}
// Does a simple MLP bprop (GEMM+bias+ReLU).
// Needs reserved space to come back exactly as it was populated in fprop.
// Does dgrad and wgrad sequentially.
template <typename T>
int mlp_bp(
T* X,
T* Y,
int input_features,
int batch_size,
T** WPtr,
int num_layers,
int* output_features,
T* dY,
T* reserved_space,
T* work_space,
T* dX,
T** dwPtr,
T** dbPtr,
bool requires_grad,
int use_bias,
int activation) {
T* weight;
T *dweight, *dx, *dy, *dbias;
T *x, *y;
// Where the dx of the biasReLU (== dy of gemm) is stored. Can be thrown away
// after bp call.
T* dy_gemm_base;
// Where the dx after GEMM is stored.
T* dx_gemm_base;
// Where partial reduction results are stored.
float* db_scratch;
// Semaphores for reduction.
int* semaphores;
partition_mlp_bp_workspace<T>(
batch_size,
num_layers,
output_features,
work_space,
&dy_gemm_base,
&dx_gemm_base,
&db_scratch,
&semaphores);
size_t semaphore_size = get_semaphores_size(num_layers, output_features) * sizeof(int);
// Get cublas handle from Pytorch
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
// Get the stream from cublas handle to reuse for biasReLU kernel.
hipStream_t stream;
hipblasGetStream(handle, &stream);
int* y_offsets = (int*)malloc(num_layers * sizeof(int));
get_y_offsets(batch_size, num_layers, output_features, y_offsets);
for (int layer = num_layers - 1; layer >= 0; layer--) {
weight = WPtr[layer];
dweight = dwPtr[layer];
// x is read from reserved space
x = (layer == 0) ? X : reserved_space + y_offsets[layer - 1];
// dx is written in workspace for all but layer==0
dx = (layer == 0) ? dX : dx_gemm_base + y_offsets[layer - 1];
// y is read from reserved space
y = (layer == num_layers - 1) ? Y : reserved_space + y_offsets[layer];
// dx from layer+1
dy = (layer == num_layers - 1) ? dY : dx_gemm_base + y_offsets[layer];
// dy_gemm is written to and read immediately
T* dy_gemm = dy_gemm_base + y_offsets[layer];
dbias = dbPtr[layer];
int xfeat = (layer == 0) ? input_features : output_features[layer - 1];
int yfeat = output_features[layer];
float one = 1.f;
float zero = 0.f;
if (use_bias == 1) {
if (activation == 0) { // no acitvation
// bgrad
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
hipMemsetAsync(semaphores, 0, semaphore_size, stream);
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( biasAdd_bprop<T, 4>), dim3(grid), dim3(block), 0, stream,
dy, yfeat, batch_size, db_scratch, semaphores, dbias);
// bypass dgrad through reset pointer
dy_gemm = dy;
} else if (activation == 1) { // relu
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
hipMemsetAsync(semaphores, 0, semaphore_size, stream);
if(yfeat % (ILP * BIAS_RELU_BW_NTHREADS_X) == 0 &&
is_aligned(y) &&
is_aligned(dy) &&
is_aligned(dy_gemm) &&
is_aligned(dbias)){
int block_x = ILP * BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( biasAddRelu_bprop_aligned<T, 4>), dim3(grid), dim3(block), 0, stream,
y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias);
} else {
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( biasAddRelu_bprop<T, 4>), dim3(grid), dim3(block), 0, stream,
y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias);
}
} else if (activation == 2) { // sigmoid
// activation backward
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Sigmoid_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm);
// bgrad, from dy_gemm
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
hipMemsetAsync(semaphores, 0, semaphore_size, stream);
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( biasAdd_bprop<T, 4>), dim3(grid), dim3(block), 0, stream,
dy_gemm, yfeat, batch_size, db_scratch, semaphores, dbias);
}
} else { // no bias below
if (activation == 0) {
// bypass dgrad through reset pointer
dy_gemm = dy;
} else if (activation == 1) { // relu
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Relu_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm);
} else if (activation == 2) { // sigmoid
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
hipLaunchKernelGGL(( Sigmoid_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm);
}
}
hipblasStatus_t cublas_status;
// Call GEMM dgrad
if (layer > 0 || requires_grad == 1) {
cublas_status = mlp_gemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
xfeat,
batch_size,
yfeat,
&one,
weight,
xfeat,
dy_gemm,
yfeat,
&zero,
dx,
xfeat);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM dgrad failed with %d\n", cublas_status);
return 1;
}
}
// Call GEMM wgrad
cublas_status = mlp_gemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
xfeat,
yfeat,
batch_size,
&one,
x,
xfeat,
dy_gemm,
yfeat,
&zero,
dweight,
xfeat);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM wgrad failed with %d\n", cublas_status);
return 1;
}
}
return 0;
}
// Instantiate for floating point types
template int mlp_fp<float>(
float* X,
int input_features,
int batch_size,
float** WPtr,
int num_layers,
int* output_features,
float** BPtr,
float* Y,
float* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<float>(
float* X,
float* Y,
int input_features,
int batch_size,
float** WPtr,
int num_layers,
int* output_features,
float* dY,
float* reserved_space,
float* work_space,
float* dX,
float** dwPtr,
float** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template int mlp_fp<at::Half>(
at::Half* X,
int input_features,
int batch_size,
at::Half** WPtr,
int num_layers,
int* output_features,
at::Half** BPtr,
at::Half* Y,
at::Half* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<at::Half>(
at::Half* X,
at::Half* Y,
int input_features,
int batch_size,
at::Half** WPtr,
int num_layers,
int* output_features,
at::Half* dY,
at::Half* reserved_space,
at::Half* work_space,
at::Half* dX,
at::Half** dwPtr,
at::Half** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template int mlp_fp<double>(
double* X,
int input_features,
int batch_size,
double** WPtr,
int num_layers,
int* output_features,
double** BPtr,
double* Y,
double* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<double>(
double* X,
double* Y,
int input_features,
int batch_size,
double** WPtr,
int num_layers,
int* output_features,
double* dY,
double* reserved_space,
double* work_space,
double* dX,
double** dwPtr,
double** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template size_t get_mlp_bp_workspace_in_bytes<float>(
int batch_size,
int num_layers,
const int* output_features);
template size_t get_mlp_bp_workspace_in_bytes<at::Half>(
int batch_size,
int num_layers,
const int* output_features);
template size_t get_mlp_bp_workspace_in_bytes<double>(
int batch_size,
int num_layers,
const int* output_features);
| 77339402a117b5a1027afbd72d21dfcff087faf0.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <torch/torch.h>
/* Includes, cuda */
#include <cublas_v2.h>
#include <cuda_runtime.h>
// includes cublaslt
#include <cublasLt.h>
// constants for fused bias+relu kernel
#define BIAS_RELU_FW_NTHREADS 128 // forward number of thread per block
#define BIAS_RELU_BW_NTHREADS_X 32 // backward number of thread in feature dim
#define BIAS_RELU_BW_NTHREADS_Y 16 // backward number of thread in batch dim
#define BIAS_RELU_RED_PER_THREAD 16 // backward minimal reduction length per thread
// move to a header later on
#define ILP 4
template<typename T>
__host__ __device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, volatile T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename T>
__device__ __forceinline__ void load_store(volatile T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
// Keep ReLU in float only. When using half, cast to float before calling.
__device__ __inline__ float relu(float a) {
float retf = max(a, 0.f);
return (retf);
}
// Keep Sigmoid in float only. When using half, cast to float before calling.
__device__ __inline__ float sigmoid(float a) {
float retf = 1.f / (1.f + expf(-a));
return (retf);
}
// FP64 Wrapper around cublas GEMMEx
cublasStatus_t mlp_gemm(
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const double* A,
int lda,
const double* B,
int ldb,
const float* beta,
double* C,
int ldc) {
return cublasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
CUDA_R_64F,
lda,
B,
CUDA_R_64F,
ldb,
beta,
C,
CUDA_R_64F,
ldc,
CUDA_R_64F,
CUBLAS_GEMM_DEFAULT);
}
// FP32 Wrapper around cublas GEMMEx
cublasStatus_t mlp_gemm(
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc) {
return cublasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
CUDA_R_32F,
lda,
B,
CUDA_R_32F,
ldb,
beta,
C,
CUDA_R_32F,
ldc,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT);
}
// FP16 Tensor core wrapper around cublas GEMMEx
cublasStatus_t mlp_gemm(
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float* alpha,
const at::Half* A,
int lda,
const at::Half* B,
int ldb,
float* beta,
at::Half* C,
int ldc) {
return cublasGemmEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
A,
CUDA_R_16F,
lda,
B,
CUDA_R_16F,
ldb,
beta,
C,
CUDA_R_16F,
ldc,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const at::Half* A,
int lda,
const at::Half* B,
int ldb,
float *beta, /* host pointer */
at::Half* C,
int ldc,
void *workspace,
size_t workspaceSize,
cudaStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
cublasLtMatmulDescOpaque_t operationDesc = {};
cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {};
cublasLtMatmulPreferenceOpaque_t preference = {};
int returnedResults = 0;
cublasLtMatmulHeuristicResult_t heuristicResult = {};
cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT;
// Create operation descriptor; see cublasLtMatmulDescAttributes_t
// for details about defaults; here we just set the transforms for
// A and B.
status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
if (use_bias) {
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias));
if (status != CUBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU_BIAS;
} else {
epilogue = CUBLASLT_EPILOGUE_BIAS;
}
} else {
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU;
}
}
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue));
if (status != CUBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
// Create matrix descriptors. Not setting any extra attributes.
status = cublasLtMatrixLayoutInit(
&Adesc, CUDA_R_16F, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(
&Bdesc, CUDA_R_16F, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(&Cdesc, CUDA_R_16F, m, n, ldc);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
// Create preference handle; In general, extra attributes can be
// used here to disable tensor ops or to make sure algo selected
// will work with badly aligned A, B, C. However, for simplicity
// here we assume A,B,C are always well aligned (e.g., directly
// come from cudaMalloc)
status = cublasLtMatmulPreferenceInit(&preference);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulPreferenceSetAttribute(
&preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
// We just need the best available heuristic to try and run matmul.
// There is no guarantee that this will work. For example, if A is
// badly aligned, you can request more (e.g. 32) algos and try to
// run them one by one until something works.
status = cublasLtMatmulAlgoGetHeuristic(
ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
if (returnedResults == 0) {
status = CUBLAS_STATUS_NOT_SUPPORTED;
goto CLEANUP;
}
status = cublasLtMatmul(ltHandle,
&operationDesc,
alpha,
A,
&Adesc,
B,
&Bdesc,
beta,
C,
&Cdesc,
C,
&Cdesc,
&heuristicResult.algo,
workspace,
workspaceSize,
stream);
CLEANUP:
// Descriptors are no longer needed as all GPU work was already
// enqueued.
return status == CUBLAS_STATUS_SUCCESS ? 0 : 1;
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const double* A,
int lda,
const double* B,
int ldb,
float *beta, /* host pointer */
double* C,
int ldc,
void *workspace,
size_t workspaceSize,
cudaStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
return 1;
}
int mlp_gemm_lt(
cublasLtHandle_t ltHandle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float *alpha, /* host pointer */
const float *A,
int lda,
const float *B,
int ldb,
float *beta, /* host pointer */
float *C,
int ldc,
void *workspace,
size_t workspaceSize,
cudaStream_t stream,
bool use_bias,
bool use_relu,
const void* bias) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
cublasLtMatmulDescOpaque_t operationDesc = {};
cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {};
cublasLtMatmulPreferenceOpaque_t preference = {};
int returnedResults = 0;
cublasLtMatmulHeuristicResult_t heuristicResult = {};
cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT;
// Create operation descriptor; see cublasLtMatmulDescAttributes_t
// for details about defaults; here we just set the transforms for
// A and B.
status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
if (use_bias) {
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias));
if (status != CUBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU_BIAS;
} else {
epilogue = CUBLASLT_EPILOGUE_BIAS;
}
} else {
if (use_relu) {
epilogue = CUBLASLT_EPILOGUE_RELU;
}
}
status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue));
if (status != CUBLAS_STATUS_SUCCESS) {
goto CLEANUP;
}
// Create matrix descriptors. Not setting any extra attributes.
status = cublasLtMatrixLayoutInit(
&Adesc, CUDA_R_32F, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(
&Bdesc, CUDA_R_32F, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatrixLayoutInit(&Cdesc, CUDA_R_32F, m, n, ldc);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
// Create preference handle; In general, extra attributes can be
// used here to disable tensor ops or to make sure algo selected
// will work with badly aligned A, B, C. However, for simplicity
// here we assume A,B,C are always well aligned (e.g., directly
// come from cudaMalloc)
status = cublasLtMatmulPreferenceInit(&preference);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
status = cublasLtMatmulPreferenceSetAttribute(
&preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize));
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
// We just need the best available heuristic to try and run matmul.
// There is no guarantee that this will work. For example, if A is
// badly aligned, you can request more (e.g. 32) algos and try to
// run them one by one until something works.
status = cublasLtMatmulAlgoGetHeuristic(
ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults);
if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
if (returnedResults == 0) {
status = CUBLAS_STATUS_NOT_SUPPORTED;
goto CLEANUP;
}
status = cublasLtMatmul(ltHandle,
&operationDesc,
alpha,
A,
&Adesc,
B,
&Bdesc,
beta,
C,
&Cdesc,
C,
&Cdesc,
&heuristicResult.algo,
workspace,
workspaceSize,
stream);
CLEANUP:
// Descriptors are no longer needed as all GPU work was already
// enqueued.
return status == CUBLAS_STATUS_SUCCESS ? 0 : 1;
}
// Bias ADD. Assume input X is [features x batch size], column major.
// Bias is one 'features' long vector, with implicit broadcast.
template <typename T>
__global__ void biasAdd_fprop(T *X, T *b, uint batch_size, uint features) {
T r_x[ILP];
T r_b[ILP];
if(is_aligned(X) && is_aligned(b) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
int row = tid % (features / ILP);
load_store(r_x, X, 0 , tid);
load_store(r_b, b, 0 , row);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = bias_sum;
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
int row = tid % features;
r_x[ii] = X[idx];
r_b[ii] = b[row];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = bias_sum;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// Bias ADD + ReLU. Assume input X is [features x batch size], column major.
// Activation support fuesed ReLU. Safe to call in-place.
template <typename T>
__global__ void biasAddRelu_fprop(T *X, T *b, uint batch_size, uint features) {
T r_x[ILP];
T r_b[ILP];
if(is_aligned(X) && is_aligned(b) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
int row = tid % (features / ILP);
load_store(r_x, X, 0 , tid);
load_store(r_b, b, 0 , row);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = relu(bias_sum);
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
int row = tid % features;
r_x[ii] = X[idx];
r_b[ii] = b[row];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]);
r_x[ii] = relu(bias_sum);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// ReLU. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Relu_fprop(T *X, uint batch_size, uint features) {
T r_x[ILP];
if(is_aligned(X) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_x, X, 0 , tid);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = relu(static_cast<float>(r_x[ii]));
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_x[ii] = X[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = relu(static_cast<float>(r_x[ii]));
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// Sigmoid. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Sigmoid_fprop(T *X, uint batch_size, uint features) {
T r_x[ILP];
if(is_aligned(X) && features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_x, X, 0 , tid);
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = sigmoid(static_cast<float>(r_x[ii]));
}
load_store(X, r_x, tid , 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_x[ii] = X[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
r_x[ii] = sigmoid(static_cast<float>(r_x[ii]));
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
X[idx] = r_x[ii];
}
}
}
}
}
// ReLU. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Relu_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) {
T r_dy[ILP];
T r_y[ILP];
if(is_aligned(dY) &&
is_aligned(Y) &&
is_aligned(dX) &&
features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_dy, dY, 0 , tid);
load_store(r_y, Y, 0 , tid);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
}
load_store(dX, r_dy, tid, 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_dy[ii] = dY[idx];
r_y[ii] = Y[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
dX[idx] = r_dy[ii];
}
}
}
}
}
// Sigmoid. Assume input X is [features x batch size], column major.
// Safe to call in-place.
template <typename T>
__global__ void Sigmoid_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) {
T r_dy[ILP];
T r_y[ILP];
if(is_aligned(dY) &&
is_aligned(Y) &&
is_aligned(dX) &&
features % ILP ==0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) {
load_store(r_dy, dY, 0 , tid);
load_store(r_y, Y, 0 , tid);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
float grad_out = r_dy[ii];
float out = r_y[ii];
float grad_i = out * ( 1.f - out) * grad_out;
r_dy[ii] = grad_i;
}
load_store(dX, r_dy, tid, 0);
}
} else {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) {
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
r_dy[ii] = dY[idx];
r_y[ii] = Y[idx];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
float grad_out = r_dy[ii];
float out = r_y[ii];
float grad_i = out * ( 1.f - out) * grad_out;
r_dy[ii] = grad_i;
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++) {
int idx = tid + ii * blockDim.x * gridDim.x;
if(idx < features * batch_size) {
dX[idx] = r_dy[ii];
}
}
}
}
}
// Compute grid size for pointwise backward kernel.
// block_x/y is total elment being handled per block, not number of threads
void get_biasAddRelu_bprop_grid_size(
int yfeat,
int batch_size,
int block_x,
int block_y,
int* grid_x,
int* grid_y) {
*grid_x = (yfeat + block_x - 1) / block_x;
// Get number of SMs for efficient reduction.
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
// can switch to occupancy calculation. use 4 below now for sm_70
int max_blocks_y = num_SMs * 4 / (*grid_x);
// block_y should be from minimal work per thread
int nRedSplits = (batch_size + block_y - 1) / block_y;
// increase number of elem per thread redcution to not launch more than enough
// kernel adjust work, so here we just launch max block
*grid_y = std::min(nRedSplits, max_blocks_y);
return;
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAdd_bprop(
T* dY,
int features,
int batch_size,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y];
// Accumulate db in FP32 always
float db_local = 0;
if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int64_t row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
db_local += (float)dY[flat_idx];
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int64_t row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
#pragma unroll 4
for (int u = 0; u < UNROLL_FACTOR; u++) {
db_local += (float)dY[flat_idx];
flat_idx += features;
}
}
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
smem[linear_idx] = db_local;
}
__syncthreads();
if (f < features) {
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
db_local += smem[yidx * blockDim.x + threadIdx.x];
}
// block result is in db_local now for all threadIdx.y == 0
// Write out partial result
out[f] = db_local;
}
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
db_local = 0;
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock && f < features) {
if(threadIdx.y == 0) {
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
db_local += (float)(intermediate[col * features + row]);
}
db[f] = (T)db_local;
}
}
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAddRelu_bprop(
T* Y,
T* dY,
int features,
int batch_size,
T* dX,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y];
// Accumulate db in FP32 always
float db_local = 0;
if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
T y_val = Y[flat_idx];
T dy_val = dY[flat_idx];
T dx_val;
if ((float)y_val > 0.f)
dx_val = dy_val;
else
dx_val = 0;
dX[flat_idx] = dx_val;
db_local += (float)dx_val;
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features + row;
#pragma unroll 4
for (int u = 0; u < UNROLL_FACTOR; u++) {
T y_val = Y[flat_idx];
T dy_val = dY[flat_idx];
T dx_val;
if ((float)y_val > 0.f)
dx_val = dy_val;
else
dx_val = 0;
dX[flat_idx] = dx_val;
db_local += (float)dx_val;
flat_idx += features;
}
}
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
smem[linear_idx] = db_local;
}
__syncthreads();
if (f < features) {
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
db_local += smem[yidx * blockDim.x + threadIdx.x];
}
// block result is in db_local now for all threadIdx.y == 0
// Write out partial result
out[f] = db_local;
}
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
db_local = 0;
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock && f < features) {
if(threadIdx.y == 0) {
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
db_local += (float)(intermediate[col * features + row]);
}
db[f] = (T)db_local;
}
}
}
// Addition done deterministically via a 2-pass approach. Each CTA writes out partial
// sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result.
template <typename T, int UNROLL_FACTOR>
__global__ void biasAddRelu_bprop_aligned(
T* Y,
T* dY,
int features,
int batch_size,
T* dX,
volatile float* intermediate,
int* semaphores,
T* db) {
// The feature that this thread is responsible for
int f = blockIdx.x * blockDim.x + threadIdx.x;
// Compute the span this thread is responsible for
// For this block
int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y;
int b_nStart = blockIdx.y * b_chunkSize;
int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart;
// For this thread
int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y;
int nStart = threadIdx.y * chunkSize + b_nStart;
int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart;
volatile float* out = intermediate + blockIdx.y * features;
// Flag to trigger last reduction.
__shared__ bool isLastBlock;
// Accumulate db in FP32 always
float db_local[ILP];
T r_y[ILP];
T r_dy[ILP];
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] = 0.f;
}
// f always <= features in this case
//if (f < features) {
int nidx = 0;
// Handle non-multiple of UNROLL_FACTOR residue
for (; nidx < nSpan % UNROLL_FACTOR; nidx++) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features / ILP + row;
load_store(r_y, Y, 0, flat_idx);
load_store(r_dy, dY, 0, flat_idx);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
db_local[ii] += (float)r_dy[ii];
}
load_store(dX, r_dy, flat_idx, 0);
}
// Handle meat of work
for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) {
int row, col, flat_idx;
row = f;
col = nStart + nidx;
flat_idx = col * features / ILP + row; // total threads in x == features/ILP
#pragma unroll
for (int u = 0; u < UNROLL_FACTOR; u++) {
load_store(r_y, Y, 0, flat_idx);
load_store(r_dy, dY, 0, flat_idx);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
if ((float)r_y[ii] <= 0.f)
r_dy[ii] = 0;
db_local[ii] += (float)r_dy[ii];
}
load_store(dX, r_dy, flat_idx, 0);
flat_idx += features/ILP;
}
}
// we know block size for now
__shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y*ILP];
// naive block reduction on y-dim
int linear_idx = threadIdx.y * blockDim.x + threadIdx.x;
float* smem_out = smem + ILP * linear_idx;
#pragma unroll
for(int ii=0;ii<ILP;ii++){
smem_out[ii] = db_local[ii]; // reuse local dy buffer
}
__syncthreads();
if(threadIdx.y == 0) {
for(int yidx = 1; yidx < blockDim.y; yidx++){
float* smem_in = smem + ILP * (yidx * blockDim.x + threadIdx.x);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] += smem_in[ii]; // reuse local dy buffer
}
}
// block result is in db_local now for all threadIdx.y == 0
if(gridDim.y == 1) {
#pragma unroll
for(int ii=0;ii<ILP;ii++){
r_dy[ii] = db_local[ii]; // reuse local dy buffer
}
load_store(db, r_dy, f, 0);
return;
}
// Write out partial result
load_store(out, db_local, f, 0);
}
__threadfence();
__syncthreads();
// Increment semaphore and check if this is the last CTA in the grid_y dimension.
// Only thread (0,0) calls this
if (threadIdx.x == 0 && threadIdx.y == 0) {
unsigned int sum_idx;
sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1);
isLastBlock = (sum_idx == (gridDim.y - 1));
}
__syncthreads();
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] = 0.f;
}
float r_db[ILP];
// No block reduction for now, only thread (*,0) do grid reduction
if (isLastBlock) {
if(threadIdx.y == 0){
for (int n = 0; n < gridDim.y; n++) {
int row, col;
row = f;
col = n;
load_store(r_db, intermediate, 0, col * features / ILP + row);
#pragma unroll
for(int ii=0;ii<ILP;ii++){
db_local[ii] += r_db[ii];
}
}
#pragma unroll
for(int ii=0;ii<ILP;ii++){
r_dy[ii] = db_local[ii]; // reuse local dy buffer
}
load_store(db, r_dy, f, 0);
}
}
}
// Lists where the num_layers-1 intermediate Y buffers start in reserved space on fprop, starting
// offset 0. The last Y value is, of course, stored in the user provided output buffer.
void get_y_offsets(
int batch_size,
int num_layers,
const int* output_features,
int* y_start_offsets) {
y_start_offsets[0] = 0;
for (int i = 1; i < num_layers; i++) {
y_start_offsets[i] = y_start_offsets[i - 1] + batch_size * output_features[i - 1];
}
}
// Returns the reserved space (in elements) needed for the MLP
size_t get_mlp_reserved_space(int64_t batch_size, int num_layers, const int* output_features) {
size_t res_space = 0;
// Need to store output of every intermediate MLP - size equal to output_features[i] * batch_size
// for all 'i' in [0, num_layers-1)
for (int l = 0; l < num_layers; l++) {
res_space += output_features[l] * batch_size;
}
return res_space;
}
// Returns the size of all fprop activations combined
size_t get_all_activations_size(int64_t batch_size, int num_layers, const int* output_features) {
size_t acts_size = 0;
for (int l = 0; l < num_layers; l++) {
acts_size += output_features[l] * batch_size;
}
return acts_size;
}
#if 0
// Returns the work space (in elements) needed for the MLP bprop.
size_t get_mlp_bp_workspace (int batch_size, int num_layers, const int* output_features) {
/*
Workspace is partitioned as
DY_GEMMs : DX_GEMMs
*/
size_t work_space = 0;
// Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p
// of biasReLU_bp and one for o/p of dgrad GEMM).
work_space += 2*get_all_activations_size(batch_size, num_layers, output_features);
return work_space;
}
#endif
// Scratch space needed for reductions in number of elements
size_t get_reduction_scratch_space(int batch_size, int num_layers, const int* output_features) {
size_t max_scratch_space = 0;
// Loop over all layers to see which one needs the max scratch space
for (int l = 0; l < num_layers; l++) {
// need to find max(aligned, not_aligned)
int tmp, res0, res1;
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(
output_features[l], batch_size, block_x, block_y, &tmp, &res0);
block_x = ILP * BIAS_RELU_BW_NTHREADS_X;
get_biasAddRelu_bprop_grid_size(
output_features[l], batch_size, block_x, block_y, &tmp, &res1);
max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res0));
max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res1));
}
return max_scratch_space;
}
// Buffer for semaphores
size_t get_semaphores_size(int num_layers, const int* output_features) {
// Upper bound on semaphores is one per feature for the layer
// with the most features.
int max_features = 0;
for (int l = 0; l < num_layers; l++) {
max_features = std::max(max_features, output_features[l]);
}
return (size_t)max_features;
}
// Returns the work space (in elements) needed for the MLP bprop.
template <typename T>
size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features) {
size_t work_space = 0;
// Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p
// of biasReLU_bp and one for o/p of dgrad GEMM).
work_space += 2 * get_all_activations_size(batch_size, num_layers, output_features) * sizeof(T);
work_space +=
get_reduction_scratch_space(batch_size, num_layers, output_features) * sizeof(float);
work_space += get_semaphores_size(num_layers, output_features) * sizeof(int);
return work_space;
}
// Returns pointers to each segment of the workspace
template <typename T>
void partition_mlp_bp_workspace(
int batch_size,
int num_layers,
const int* output_features,
void* work_space,
T** dy_gemms,
T** dx_gemms,
float** db_scratch,
int** semaphores) {
/*
Workspace is partitioned as
DY_GEMMs : DX_GEMMs : DB_SCRATCH : SEMAPHORES
*/
// Start address where dy_gemm tensors are stored
*dy_gemms = reinterpret_cast<T*>(work_space);
// Start address where dx_gemm tensors are stored
*dx_gemms = *dy_gemms + get_all_activations_size(batch_size, num_layers, output_features);
// Start address where db intermediate tensors are stored
*db_scratch = reinterpret_cast<float*>(
*dx_gemms + get_all_activations_size(batch_size, num_layers, output_features));
// Start address of semaphores
*semaphores = reinterpret_cast<int*>(
*db_scratch + get_reduction_scratch_space(batch_size, num_layers, output_features));
return;
}
// Does a simple MLP fprop (GEMM+bias+ReLU).
// Can handle num_layers number of layers, each with its own shape. Output of layer i is assumed
// to be input of layer i+1. output_features, WPtr and BPtr are arrays of length num_layers, and
// must be in the same order i.e. WPtr[i] and BPtr[i] are respectively the weight and bias of layer
// 'i'.
template <typename T>
int mlp_fp(
T* X,
int input_features,
int batch_size,
T** WPtr,
int num_layers,
int* output_features,
T** BPtr,
T* Y,
T* reserved_space,
int use_bias,
int activation,
void* lt_workspace) {
T *weight, *input, *output, *bias;
T *reserved_space_x, *reserved_space_y;
reserved_space_x = NULL;
reserved_space_y = reserved_space;
// Get cublas handle from Pytorch
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cublasLtHandle_t ltHandle;
cublasStatus_t lthandle_status;
lthandle_status = cublasLtCreate(<Handle);
// Get the stream from cublas handle to reuse for biasReLU kernel.
cudaStream_t stream;
cublasGetStream(handle, &stream);
for (int layer = 0; layer < num_layers; layer++) {
weight = WPtr[layer];
input = (layer == 0) ? X : reserved_space_x;
output = (layer == num_layers - 1) ? Y : reserved_space_y;
if (use_bias) {
bias = BPtr[layer];
}
int ifeat = (layer == 0) ? input_features : output_features[layer - 1];
int ofeat = output_features[layer];
float one = 1.f;
float zero = 0.f;
// try with cublaslt first for supported case with valid handle
int cublaslt_status = 1;
if(lthandle_status == CUBLAS_STATUS_SUCCESS && activation < 2){
cublaslt_status = mlp_gemm_lt(
ltHandle,
CUBLAS_OP_T,
CUBLAS_OP_N,
ofeat,
batch_size,
ifeat,
&one,
weight,
ifeat,
input,
ifeat,
&zero,
output,
ofeat,
lt_workspace,
1 << 22,
stream,
use_bias == 1,
activation == 1,
bias);
}
// if cublaslt failed or not executed, fallback to cublas
if (cublaslt_status != 0) {
cublasStatus_t cublas_status;
// Call GEMM: fprop is Y = W'X
cublas_status = mlp_gemm(
handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
ofeat,
batch_size,
ifeat,
&one,
weight,
ifeat,
input,
ifeat,
&zero,
output,
ofeat);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM fprop failed with %d\n", cublas_status);
return 1;
}
const uint &input_size = ofeat;
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
// Call biasReLU
if(use_bias == 1) {
if (activation == 0) { // no activation
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size);
} else if (activation == 1) { // relu
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAddRelu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
biasAddRelu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size);
} else if (activation == 2) { // sigmoid
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size);
}
} else {
// don't need to do anything in case of no activation and no bias
if (activation == 1) { // relu
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Relu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size);
} else if (activation == 2) { // sigmoid
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size);
}
}
}
// Set current output as next layer input
reserved_space_x = reserved_space_y;
// Set next layer output
reserved_space_y += ofeat * batch_size;
}
if(lthandle_status == CUBLAS_STATUS_SUCCESS) cublasLtDestroy(ltHandle);
return 0;
}
// Does a simple MLP bprop (GEMM+bias+ReLU).
// Needs reserved space to come back exactly as it was populated in fprop.
// Does dgrad and wgrad sequentially.
template <typename T>
int mlp_bp(
T* X,
T* Y,
int input_features,
int batch_size,
T** WPtr,
int num_layers,
int* output_features,
T* dY,
T* reserved_space,
T* work_space,
T* dX,
T** dwPtr,
T** dbPtr,
bool requires_grad,
int use_bias,
int activation) {
T* weight;
T *dweight, *dx, *dy, *dbias;
T *x, *y;
// Where the dx of the biasReLU (== dy of gemm) is stored. Can be thrown away
// after bp call.
T* dy_gemm_base;
// Where the dx after GEMM is stored.
T* dx_gemm_base;
// Where partial reduction results are stored.
float* db_scratch;
// Semaphores for reduction.
int* semaphores;
partition_mlp_bp_workspace<T>(
batch_size,
num_layers,
output_features,
work_space,
&dy_gemm_base,
&dx_gemm_base,
&db_scratch,
&semaphores);
size_t semaphore_size = get_semaphores_size(num_layers, output_features) * sizeof(int);
// Get cublas handle from Pytorch
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
// Get the stream from cublas handle to reuse for biasReLU kernel.
cudaStream_t stream;
cublasGetStream(handle, &stream);
int* y_offsets = (int*)malloc(num_layers * sizeof(int));
get_y_offsets(batch_size, num_layers, output_features, y_offsets);
for (int layer = num_layers - 1; layer >= 0; layer--) {
weight = WPtr[layer];
dweight = dwPtr[layer];
// x is read from reserved space
x = (layer == 0) ? X : reserved_space + y_offsets[layer - 1];
// dx is written in workspace for all but layer==0
dx = (layer == 0) ? dX : dx_gemm_base + y_offsets[layer - 1];
// y is read from reserved space
y = (layer == num_layers - 1) ? Y : reserved_space + y_offsets[layer];
// dx from layer+1
dy = (layer == num_layers - 1) ? dY : dx_gemm_base + y_offsets[layer];
// dy_gemm is written to and read immediately
T* dy_gemm = dy_gemm_base + y_offsets[layer];
dbias = dbPtr[layer];
int xfeat = (layer == 0) ? input_features : output_features[layer - 1];
int yfeat = output_features[layer];
float one = 1.f;
float zero = 0.f;
if (use_bias == 1) {
if (activation == 0) { // no acitvation
// bgrad
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
cudaMemsetAsync(semaphores, 0, semaphore_size, stream);
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>(
dy, yfeat, batch_size, db_scratch, semaphores, dbias);
// bypass dgrad through reset pointer
dy_gemm = dy;
} else if (activation == 1) { // relu
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
cudaMemsetAsync(semaphores, 0, semaphore_size, stream);
if(yfeat % (ILP * BIAS_RELU_BW_NTHREADS_X) == 0 &&
is_aligned(y) &&
is_aligned(dy) &&
is_aligned(dy_gemm) &&
is_aligned(dbias)){
int block_x = ILP * BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
biasAddRelu_bprop_aligned<T, 4><<<grid, block, 0, stream>>>(
y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias);
} else {
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
biasAddRelu_bprop<T, 4><<<grid, block, 0, stream>>>(
y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias);
}
} else if (activation == 2) { // sigmoid
// activation backward
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm);
// bgrad, from dy_gemm
dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y);
int grid_x, grid_y;
cudaMemsetAsync(semaphores, 0, semaphore_size, stream);
int block_x = BIAS_RELU_BW_NTHREADS_X;
int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y;
get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y);
dim3 grid(grid_x, grid_y);
biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>(
dy_gemm, yfeat, batch_size, db_scratch, semaphores, dbias);
}
} else { // no bias below
if (activation == 0) {
// bypass dgrad through reset pointer
dy_gemm = dy;
} else if (activation == 1) { // relu
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Relu_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm);
} else if (activation == 2) { // sigmoid
int num_blocks = 0;
int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0);
Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm);
}
}
cublasStatus_t cublas_status;
// Call GEMM dgrad
if (layer > 0 || requires_grad == 1) {
cublas_status = mlp_gemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
xfeat,
batch_size,
yfeat,
&one,
weight,
xfeat,
dy_gemm,
yfeat,
&zero,
dx,
xfeat);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM dgrad failed with %d\n", cublas_status);
return 1;
}
}
// Call GEMM wgrad
cublas_status = mlp_gemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
xfeat,
yfeat,
batch_size,
&one,
x,
xfeat,
dy_gemm,
yfeat,
&zero,
dweight,
xfeat);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM wgrad failed with %d\n", cublas_status);
return 1;
}
}
return 0;
}
// Instantiate for floating point types
template int mlp_fp<float>(
float* X,
int input_features,
int batch_size,
float** WPtr,
int num_layers,
int* output_features,
float** BPtr,
float* Y,
float* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<float>(
float* X,
float* Y,
int input_features,
int batch_size,
float** WPtr,
int num_layers,
int* output_features,
float* dY,
float* reserved_space,
float* work_space,
float* dX,
float** dwPtr,
float** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template int mlp_fp<at::Half>(
at::Half* X,
int input_features,
int batch_size,
at::Half** WPtr,
int num_layers,
int* output_features,
at::Half** BPtr,
at::Half* Y,
at::Half* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<at::Half>(
at::Half* X,
at::Half* Y,
int input_features,
int batch_size,
at::Half** WPtr,
int num_layers,
int* output_features,
at::Half* dY,
at::Half* reserved_space,
at::Half* work_space,
at::Half* dX,
at::Half** dwPtr,
at::Half** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template int mlp_fp<double>(
double* X,
int input_features,
int batch_size,
double** WPtr,
int num_layers,
int* output_features,
double** BPtr,
double* Y,
double* reserved_space,
int use_bias,
int activation,
void* lt_workspace);
template int mlp_bp<double>(
double* X,
double* Y,
int input_features,
int batch_size,
double** WPtr,
int num_layers,
int* output_features,
double* dY,
double* reserved_space,
double* work_space,
double* dX,
double** dwPtr,
double** dbPtr,
bool requires_grad,
int use_bias,
int activation);
template size_t get_mlp_bp_workspace_in_bytes<float>(
int batch_size,
int num_layers,
const int* output_features);
template size_t get_mlp_bp_workspace_in_bytes<at::Half>(
int batch_size,
int num_layers,
const int* output_features);
template size_t get_mlp_bp_workspace_in_bytes<double>(
int batch_size,
int num_layers,
const int* output_features);
|
1ac2d0c5576ffb1495c24a6205799cb8ac0ff86f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void rev(char *a,char *b,int *len)
{
int id=threadIdx.x;
b[id]=a[*len-id-1];
b[*len-id-1]=a[id];
}
int main(void)
{
char a[20], b[20];
int *d_m;
char *d_a, *d_b;
printf("Enter String:");
scanf("%s",a);
int size = sizeof(int)*strlen(a);
int len=strlen(a);
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_m,sizeof(int));
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_m, &len, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rev), dim3(1),dim3(len), 0, 0, d_a, d_b,d_m);
hipMemcpy(&b,d_b, size, hipMemcpyDeviceToHost);
printf("%s\n",b);
hipFree(d_a);
hipFree(d_b);
hipFree(d_m);
}
| 1ac2d0c5576ffb1495c24a6205799cb8ac0ff86f.cu | #include <stdio.h>
__global__ void rev(char *a,char *b,int *len)
{
int id=threadIdx.x;
b[id]=a[*len-id-1];
b[*len-id-1]=a[id];
}
int main(void)
{
char a[20], b[20];
int *d_m;
char *d_a, *d_b;
printf("Enter String:");
scanf("%s",a);
int size = sizeof(int)*strlen(a);
int len=strlen(a);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_m,sizeof(int));
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_m, &len, sizeof(int), cudaMemcpyHostToDevice);
rev<<<1,len>>>(d_a, d_b,d_m);
cudaMemcpy(&b,d_b, size, cudaMemcpyDeviceToHost);
printf("%s\n",b);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_m);
}
|
d96e1f26fddfdf127318c58ef6ec4a9779467dd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "custom_cuda_layers.h"
const int unroll_factor = 4;
__global__ void dropout_kernel(const int N,
const float ratio,
float* out,
const float* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float4 rand = hiprand_uniform4(&state);
uint8_t m[unroll_factor];
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
int i = j * unroll_factor;
mask[i] = (uint8_t)m[0];
mask[i + 1] = (uint8_t)m[1];
mask[i + 2] = (uint8_t)m[2];
mask[i + 3] = (uint8_t)m[3];
out[i] = Xdata[i] * scale * m[0];
out[i + 1] = Xdata[i + 1] * scale * m[1];
out[i + 2] = Xdata[i + 2] * scale * m[2];
out[i + 3] = Xdata[i + 3] * scale * m[3];
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
out[i] = Xdata[i] * scale * m;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const float ratio,
__half* out,
const __half* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
#ifdef __STOCHASTIC_MODE__
const __half2 h_scale = __float2half2_rn(scale);
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
uint32_t m_32;
uint8_t* m = reinterpret_cast<uint8_t*>(&m_32);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
__half2 mask_h[2];
float2 mask_f[2];
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_f = x_cast[j];
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
float4 rand = hiprand_uniform4(&state);
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
mask_h[0] = __float22half2_rn(mask_f[0]);
mask_h[1] = __float22half2_rn(mask_f[1]);
result_h[0] = x_h[0] * h_scale * mask_h[0];
result_h[1] = x_h[1] * h_scale * mask_h[1];
out_cast[j] = result_f;
mask_cast[j] = m_32;
}
#else
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
float2 vals_half_f[2];
vals_half_f[0] = __half22float2(vals_half[0]);
vals_half_f[1] = __half22float2(vals_half[1]);
uint8_t m[unroll_factor];
float4 rand = hiprand_uniform4(&state);
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
mask[i] = m[0];
mask[i + 1] = m[1];
mask[i + 2] = m[2];
mask[i + 3] = m[3];
}
#endif
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
out[i] = __float2half((float)Xdata[i] * scale * m);
mask[i] = m;
}
}
}
__global__ void dropout_kernel_bwd(const int N,
const float ratio,
const float* Xdata,
float* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
out[i] = mask[i] ? Xdata[i] * scale : 0.0;
out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0;
out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0;
out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; }
}
}
__global__ void dropout_kernel_bwd(const int N,
const float ratio,
const __half* Xdata,
__half* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
#ifdef __STOCHASTIC_MODE__
const __half2 h_scale = __float2half2_rn(scale);
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_f = x_cast[j];
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
__half2 mask_h[2];
float2 mask_f[2];
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
#pragma unroll
for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = x_h[0] * h_scale * mask_h[0];
result_h[1] = x_h[1] * h_scale * mask_h[1];
out_cast[j] = result_f;
}
#else
const __half h_scale = __float2half(scale);
const __half h_zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
uint8_t* m = mask + i;
float2 vals_half_f[2];
vals_half_f[0] = __half22float2(vals_half[0]);
vals_half_f[1] = __half22float2(vals_half[1]);
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
}
#endif
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
hipStream_t stream,
bool bwd)
{
assert(unroll_factor == 4);
dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor);
dim3 block_dim = DS_CUDA_NUM_THREADS;
if (dim > 512) {
block_dim.x >>= 1;
grid_dim.x <<= 1;
}
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
if (bwd)
hipLaunchKernelGGL(( dropout_kernel_bwd), dim3(grid_dim), dim3(block_dim), 0, stream,
total_count, ratio, vals, out, mask, seed);
else
hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
total_count, ratio, out, vals, mask, seed);
}
template void launch_dropout(float* out,
const float* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
hipStream_t stream,
bool);
template void launch_dropout(__half* out,
const __half* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
hipStream_t stream,
bool);
__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask)
{
CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; }
}
__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask)
{
const __half2 h_scale = __float2half2_rn(scale);
float2* x_cast = reinterpret_cast<float2*>(Xdata);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_data = x_cast[j];
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
#ifdef __STOCHASTIC_MODE__
__half2* x_data_h = reinterpret_cast<__half2*>(&x_data);
__half2 mask_h[2];
float2 mask_f[2];
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]);
mask_h[0] = __float22half2_rn(mask_f[0]);
mask_h[1] = __float22half2_rn(mask_f[1]);
result_h[0] = x_data_h[0] * h_scale * mask_h[0];
result_h[1] = x_data_h[1] * h_scale * mask_h[1];
#else
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
float2 result[2];
result[0].x = (float)x_data_h[0] * scale * m[0];
result[0].y = (float)x_data_h[1] * scale * m[1];
result[1].x = (float)x_data_h[2] * scale * m[2];
result[1].y = (float)x_data_h[3] * scale * m[3];
result_h[0] = __float22half2_rn(result[0]);
result_h[1] = __float22half2_rn(result[1]);
#endif
x_cast[j] = result_f;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream)
{
assert(unroll_factor == 4);
const float scale = 1. / (1. - ratio);
hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)),
dim3(DS_CUDA_NUM_THREADS),
0,
stream, total_count, scale, vals, mask);
}
template void launch_dropout_grad(float* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream);
template void launch_dropout_grad(__half* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream);
__global__ void dropout_grad_kernel(const int N,
const float scale,
const float* Xdata,
float* out,
uint8_t* mask)
{
CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; }
}
__global__ void dropout_grad_kernel(const int N,
const float scale,
const __half* Xdata,
__half* out,
uint8_t* mask)
{
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
const uint32_t* mask_cast = reinterpret_cast<const uint32_t*>(mask);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_data = x_cast[j];
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
float2 result[2];
result[0].x = (float)x_data_h[0] * scale * m[0];
result[0].y = (float)x_data_h[1] * scale * m[1];
result[1].x = (float)x_data_h[2] * scale * m[2];
result[1].y = (float)x_data_h[3] * scale * m[3];
result_h[0] = __float22half2_rn(result[0]);
result_h[1] = __float22half2_rn(result[1]);
out_cast[j] = result_f;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout_grad(T* vals_out,
const T* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream)
{
assert(unroll_factor == 4);
const float scale = 1. / (1. - ratio);
hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)),
dim3(DS_CUDA_NUM_THREADS),
0,
stream, total_count, scale, vals, vals_out, mask);
}
template void launch_dropout_grad(float*,
const float* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream);
template void launch_dropout_grad(__half*,
const __half* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream);
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const float* bias,
float* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
float4* Xdata_cast = reinterpret_cast<float4*>(Xdata);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = hiprand_uniform4(&state);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float4 x_data = Xdata_cast[j];
float4 b_data = bias_cast[j % (dim / unroll_factor)];
x_data.x += b_data.x;
x_data.y += b_data.y;
x_data.z += b_data.z;
x_data.w += b_data.w;
x_data.x = x_data.x * scale * m[0];
x_data.y = x_data.y * scale * m[1];
x_data.z = x_data.z * scale * m[2];
x_data.w = x_data.w * scale * m[3];
mask_32[j] = m_32;
Xdata_cast[j] = x_data;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = Xdata[i] + bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
Xdata[i] = x_data * scale * m;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const __half* bias,
__half* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
float2* Xdata_cast = reinterpret_cast<float2*>(Xdata);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = hiprand_uniform4(&state);
float2 data_f;
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
float2 bias_f;
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
data_f = Xdata_cast[j];
bias_f = bias_cast[j % (dim / unroll_factor)];
float2 data_h_0 = __half22float2(data_h[0]);
float2 data_h_1 = __half22float2(data_h[1]);
float2 bias_h_0 = __half22float2(bias_h[0]);
float2 bias_h_1 = __half22float2(bias_h[1]);
data_h_0.x += bias_h_0.x;
data_h_0.y += bias_h_0.y;
data_h_1.x += bias_h_1.x;
data_h_1.y += bias_h_1.y;
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = __float22half2_rn(data_h_0);
result_h[1] = __float22half2_rn(data_h_1);
Xdata_cast[j] = result_f;
mask_32[j] = m_32;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = (float)Xdata[i] + (float)bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
Xdata[i] = __float2half(x_data * scale * m);
mask[i] = m;
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream)
{
assert(unroll_factor == 4);
int total_count = batch * dim / unroll_factor;
dim3 grid_dim = DS_GET_BLOCKS(total_count);
dim3 block_dim = DS_CUDA_NUM_THREADS;
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
total_count, dim, ratio, bias, out, mask, seed);
}
template void launch_dropout(float*,
const float* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
template void launch_dropout(__half*,
const __half* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const float* input,
const float* residual,
const float* bias,
float* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
float4* out_cast = reinterpret_cast<float4*>(out);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
const float4* residual_cast = reinterpret_cast<const float4*>(residual);
const float4* input_cast = reinterpret_cast<const float4*>(input);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = hiprand_uniform4(&state);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float4 out_data;
float4 b_data = bias_cast[j % (dim / unroll_factor)];
float4 res_data = residual_cast[j];
float4 inp_data = input_cast[j];
out_data.x = (b_data.x + inp_data.x);
out_data.y = (b_data.y + inp_data.y);
out_data.z = (b_data.z + inp_data.z);
out_data.w = (b_data.w + inp_data.w);
out_data.x = out_data.x * scale * m[0];
out_data.y = out_data.y * scale * m[1];
out_data.z = out_data.z * scale * m[2];
out_data.w = out_data.w * scale * m[3];
out_data.x += res_data.x;
out_data.y += res_data.y;
out_data.z += res_data.z;
out_data.w += res_data.w;
mask_32[j] = m_32;
out_cast[j] = out_data;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = input[i] + bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
x_data = x_data * scale * m;
x_data += residual[i];
out[i] = x_data;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const __half* input,
const __half* residual,
const __half* bias,
__half* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
const float2* residual_cast = reinterpret_cast<const float2*>(residual);
const float2* input_cast = reinterpret_cast<const float2*>(input);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = hiprand_uniform4(&state);
float2 data_f;
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
float2 bias_f;
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
float2 residual_f;
__half2* residual_h = reinterpret_cast<__half2*>(&residual_f);
float2 input_f;
__half2* input_h = reinterpret_cast<__half2*>(&input_f);
bias_f = bias_cast[j % (dim / unroll_factor)];
residual_f = residual_cast[j];
input_f = input_cast[j];
float2 data_h_0 = __half22float2(data_h[0]);
float2 data_h_1 = __half22float2(data_h[1]);
float2 bias_h_0 = __half22float2(bias_h[0]);
float2 bias_h_1 = __half22float2(bias_h[1]);
float2 residual_h_0 = __half22float2(residual_h[0]);
float2 residual_h_1 = __half22float2(residual_h[1]);
float2 input_h_0 = __half22float2(input_h[0]);
float2 input_h_1 = __half22float2(input_h[1]);
data_h_0.x = (bias_h_0.x + input_h_0.x);
data_h_0.y = (bias_h_0.y + input_h_0.y);
data_h_1.x = (bias_h_1.x + input_h_1.x);
data_h_1.y = (bias_h_1.y + input_h_1.y);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
data_h_0.x += residual_h_0.x;
data_h_0.y += residual_h_0.y;
data_h_1.x += residual_h_1.x;
data_h_1.y += residual_h_1.y;
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = __float22half2_rn(data_h_0);
result_h[1] = __float22half2_rn(data_h_1);
out_cast[j] = result_f;
mask_32[j] = m_32;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = hiprand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = (float)input[i] + (float)bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
x_data = x_data * scale * m;
x_data += (float)residual[i];
out[i] = __float2half(x_data);
mask[i] = m;
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* input,
const T* residual,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream)
{
assert(unroll_factor == 4);
int total_count = batch * dim / unroll_factor;
dim3 grid_dim = DS_GET_BLOCKS(total_count);
dim3 block_dim = DS_CUDA_NUM_THREADS;
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
total_count, dim, ratio, input, residual, bias, out, mask, seed);
}
template void launch_dropout(float*,
const float*,
const float* residual,
const float* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
template void launch_dropout(__half*,
const __half*,
const __half* residual,
const __half* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
| d96e1f26fddfdf127318c58ef6ec4a9779467dd5.cu | #include "custom_cuda_layers.h"
const int unroll_factor = 4;
__global__ void dropout_kernel(const int N,
const float ratio,
float* out,
const float* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float4 rand = curand_uniform4(&state);
uint8_t m[unroll_factor];
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
int i = j * unroll_factor;
mask[i] = (uint8_t)m[0];
mask[i + 1] = (uint8_t)m[1];
mask[i + 2] = (uint8_t)m[2];
mask[i + 3] = (uint8_t)m[3];
out[i] = Xdata[i] * scale * m[0];
out[i + 1] = Xdata[i + 1] * scale * m[1];
out[i + 2] = Xdata[i + 2] * scale * m[2];
out[i + 3] = Xdata[i + 3] * scale * m[3];
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
out[i] = Xdata[i] * scale * m;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const float ratio,
__half* out,
const __half* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
#ifdef __STOCHASTIC_MODE__
const __half2 h_scale = __float2half2_rn(scale);
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
uint32_t m_32;
uint8_t* m = reinterpret_cast<uint8_t*>(&m_32);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
__half2 mask_h[2];
float2 mask_f[2];
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_f = x_cast[j];
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
float4 rand = curand_uniform4(&state);
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
mask_h[0] = __float22half2_rn(mask_f[0]);
mask_h[1] = __float22half2_rn(mask_f[1]);
result_h[0] = x_h[0] * h_scale * mask_h[0];
result_h[1] = x_h[1] * h_scale * mask_h[1];
out_cast[j] = result_f;
mask_cast[j] = m_32;
}
#else
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
float2 vals_half_f[2];
vals_half_f[0] = __half22float2(vals_half[0]);
vals_half_f[1] = __half22float2(vals_half[1]);
uint8_t m[unroll_factor];
float4 rand = curand_uniform4(&state);
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
mask[i] = m[0];
mask[i + 1] = m[1];
mask[i + 2] = m[2];
mask[i + 3] = m[3];
}
#endif
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
out[i] = __float2half((float)Xdata[i] * scale * m);
mask[i] = m;
}
}
}
__global__ void dropout_kernel_bwd(const int N,
const float ratio,
const float* Xdata,
float* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
out[i] = mask[i] ? Xdata[i] * scale : 0.0;
out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0;
out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0;
out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; }
}
}
__global__ void dropout_kernel_bwd(const int N,
const float ratio,
const __half* Xdata,
__half* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
#ifdef __STOCHASTIC_MODE__
const __half2 h_scale = __float2half2_rn(scale);
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_f = x_cast[j];
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
__half2 mask_h[2];
float2 mask_f[2];
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
#pragma unroll
for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = x_h[0] * h_scale * mask_h[0];
result_h[1] = x_h[1] * h_scale * mask_h[1];
out_cast[j] = result_f;
}
#else
const __half h_scale = __float2half(scale);
const __half h_zero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
int i = j * unroll_factor;
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
uint8_t* m = mask + i;
float2 vals_half_f[2];
vals_half_f[0] = __half22float2(vals_half[0]);
vals_half_f[1] = __half22float2(vals_half[1]);
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
}
#endif
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
cudaStream_t stream,
bool bwd)
{
assert(unroll_factor == 4);
dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor);
dim3 block_dim = DS_CUDA_NUM_THREADS;
if (dim > 512) {
block_dim.x >>= 1;
grid_dim.x <<= 1;
}
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
if (bwd)
dropout_kernel_bwd<<<grid_dim, block_dim, 0, stream>>>(
total_count, ratio, vals, out, mask, seed);
else
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
total_count, ratio, out, vals, mask, seed);
}
template void launch_dropout(float* out,
const float* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
cudaStream_t stream,
bool);
template void launch_dropout(__half* out,
const __half* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
cudaStream_t stream,
bool);
__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask)
{
CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; }
}
__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask)
{
const __half2 h_scale = __float2half2_rn(scale);
float2* x_cast = reinterpret_cast<float2*>(Xdata);
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_data = x_cast[j];
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
#ifdef __STOCHASTIC_MODE__
__half2* x_data_h = reinterpret_cast<__half2*>(&x_data);
__half2 mask_h[2];
float2 mask_f[2];
float* mask_f_data = &mask_f[0].x;
#pragma unroll
for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]);
mask_h[0] = __float22half2_rn(mask_f[0]);
mask_h[1] = __float22half2_rn(mask_f[1]);
result_h[0] = x_data_h[0] * h_scale * mask_h[0];
result_h[1] = x_data_h[1] * h_scale * mask_h[1];
#else
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
float2 result[2];
result[0].x = (float)x_data_h[0] * scale * m[0];
result[0].y = (float)x_data_h[1] * scale * m[1];
result[1].x = (float)x_data_h[2] * scale * m[2];
result[1].y = (float)x_data_h[3] * scale * m[3];
result_h[0] = __float22half2_rn(result[0]);
result_h[1] = __float22half2_rn(result[1]);
#endif
x_cast[j] = result_f;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream)
{
assert(unroll_factor == 4);
const float scale = 1. / (1. - ratio);
dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor),
DS_CUDA_NUM_THREADS,
0,
stream>>>(total_count, scale, vals, mask);
}
template void launch_dropout_grad(float* vals,
uint8_t* mask,
int total_count,
float ratio,
cudaStream_t stream);
template void launch_dropout_grad(__half* vals,
uint8_t* mask,
int total_count,
float ratio,
cudaStream_t stream);
__global__ void dropout_grad_kernel(const int N,
const float scale,
const float* Xdata,
float* out,
uint8_t* mask)
{
CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; }
}
__global__ void dropout_grad_kernel(const int N,
const float scale,
const __half* Xdata,
__half* out,
uint8_t* mask)
{
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
float2* out_cast = reinterpret_cast<float2*>(out);
const uint32_t* mask_cast = reinterpret_cast<const uint32_t*>(mask);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
{
float2 x_data = x_cast[j];
uint32_t m_32 = mask_cast[j];
uint8_t* m = (uint8_t*)&m_32;
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
float2 result[2];
result[0].x = (float)x_data_h[0] * scale * m[0];
result[0].y = (float)x_data_h[1] * scale * m[1];
result[1].x = (float)x_data_h[2] * scale * m[2];
result[1].y = (float)x_data_h[3] * scale * m[3];
result_h[0] = __float22half2_rn(result[0]);
result_h[1] = __float22half2_rn(result[1]);
out_cast[j] = result_f;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
for (int i = high_index; i < N; i++) {
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
}
}
}
template <typename T>
void launch_dropout_grad(T* vals_out,
const T* vals,
uint8_t* mask,
int total_count,
float ratio,
cudaStream_t stream)
{
assert(unroll_factor == 4);
const float scale = 1. / (1. - ratio);
dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor),
DS_CUDA_NUM_THREADS,
0,
stream>>>(total_count, scale, vals, vals_out, mask);
}
template void launch_dropout_grad(float*,
const float* vals,
uint8_t* mask,
int total_count,
float ratio,
cudaStream_t stream);
template void launch_dropout_grad(__half*,
const __half* vals,
uint8_t* mask,
int total_count,
float ratio,
cudaStream_t stream);
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const float* bias,
float* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
float4* Xdata_cast = reinterpret_cast<float4*>(Xdata);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = curand_uniform4(&state);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float4 x_data = Xdata_cast[j];
float4 b_data = bias_cast[j % (dim / unroll_factor)];
x_data.x += b_data.x;
x_data.y += b_data.y;
x_data.z += b_data.z;
x_data.w += b_data.w;
x_data.x = x_data.x * scale * m[0];
x_data.y = x_data.y * scale * m[1];
x_data.z = x_data.z * scale * m[2];
x_data.w = x_data.w * scale * m[3];
mask_32[j] = m_32;
Xdata_cast[j] = x_data;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = Xdata[i] + bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
Xdata[i] = x_data * scale * m;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const __half* bias,
__half* Xdata,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
float2* Xdata_cast = reinterpret_cast<float2*>(Xdata);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = curand_uniform4(&state);
float2 data_f;
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
float2 bias_f;
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
data_f = Xdata_cast[j];
bias_f = bias_cast[j % (dim / unroll_factor)];
float2 data_h_0 = __half22float2(data_h[0]);
float2 data_h_1 = __half22float2(data_h[1]);
float2 bias_h_0 = __half22float2(bias_h[0]);
float2 bias_h_1 = __half22float2(bias_h[1]);
data_h_0.x += bias_h_0.x;
data_h_0.y += bias_h_0.y;
data_h_1.x += bias_h_1.x;
data_h_1.y += bias_h_1.y;
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = __float22half2_rn(data_h_0);
result_h[1] = __float22half2_rn(data_h_1);
Xdata_cast[j] = result_f;
mask_32[j] = m_32;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = (float)Xdata[i] + (float)bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
Xdata[i] = __float2half(x_data * scale * m);
mask[i] = m;
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream)
{
assert(unroll_factor == 4);
int total_count = batch * dim / unroll_factor;
dim3 grid_dim = DS_GET_BLOCKS(total_count);
dim3 block_dim = DS_CUDA_NUM_THREADS;
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
total_count, dim, ratio, bias, out, mask, seed);
}
template void launch_dropout(float*,
const float* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream);
template void launch_dropout(__half*,
const __half* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream);
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const float* input,
const float* residual,
const float* bias,
float* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
float4* out_cast = reinterpret_cast<float4*>(out);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
const float4* residual_cast = reinterpret_cast<const float4*>(residual);
const float4* input_cast = reinterpret_cast<const float4*>(input);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = curand_uniform4(&state);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
float4 out_data;
float4 b_data = bias_cast[j % (dim / unroll_factor)];
float4 res_data = residual_cast[j];
float4 inp_data = input_cast[j];
out_data.x = (b_data.x + inp_data.x);
out_data.y = (b_data.y + inp_data.y);
out_data.z = (b_data.z + inp_data.z);
out_data.w = (b_data.w + inp_data.w);
out_data.x = out_data.x * scale * m[0];
out_data.y = out_data.y * scale * m[1];
out_data.z = out_data.z * scale * m[2];
out_data.w = out_data.w * scale * m[3];
out_data.x += res_data.x;
out_data.y += res_data.y;
out_data.z += res_data.z;
out_data.w += res_data.w;
mask_32[j] = m_32;
out_cast[j] = out_data;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = input[i] + bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
x_data = x_data * scale * m;
x_data += residual[i];
out[i] = x_data;
mask[i] = m;
}
}
}
__global__ void dropout_kernel(const int N,
const int dim,
const float ratio,
const __half* input,
const __half* residual,
const __half* bias,
__half* out,
uint8_t* mask,
std::pair<uint64_t, uint64_t> seed)
{
const float scale = 1. / (1. - ratio);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x % (dim / unroll_factor);
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
float2* out_cast = reinterpret_cast<float2*>(out);
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
const float2* residual_cast = reinterpret_cast<const float2*>(residual);
const float2* input_cast = reinterpret_cast<const float2*>(input);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 rand = curand_uniform4(&state);
float2 data_f;
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
float2 bias_f;
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
float2 residual_f;
__half2* residual_h = reinterpret_cast<__half2*>(&residual_f);
float2 input_f;
__half2* input_h = reinterpret_cast<__half2*>(&input_f);
bias_f = bias_cast[j % (dim / unroll_factor)];
residual_f = residual_cast[j];
input_f = input_cast[j];
float2 data_h_0 = __half22float2(data_h[0]);
float2 data_h_1 = __half22float2(data_h[1]);
float2 bias_h_0 = __half22float2(bias_h[0]);
float2 bias_h_1 = __half22float2(bias_h[1]);
float2 residual_h_0 = __half22float2(residual_h[0]);
float2 residual_h_1 = __half22float2(residual_h[1]);
float2 input_h_0 = __half22float2(input_h[0]);
float2 input_h_1 = __half22float2(input_h[1]);
data_h_0.x = (bias_h_0.x + input_h_0.x);
data_h_0.y = (bias_h_0.y + input_h_0.y);
data_h_1.x = (bias_h_1.x + input_h_1.x);
data_h_1.y = (bias_h_1.y + input_h_1.y);
uint32_t m_32;
uint8_t* m = (uint8_t*)&m_32;
m[0] = (uint8_t)(rand.x > ratio);
m[1] = (uint8_t)(rand.y > ratio);
m[2] = (uint8_t)(rand.z > ratio);
m[3] = (uint8_t)(rand.w > ratio);
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
data_h_0.x += residual_h_0.x;
data_h_0.y += residual_h_0.y;
data_h_1.x += residual_h_1.x;
data_h_1.y += residual_h_1.y;
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
result_h[0] = __float22half2_rn(data_h_0);
result_h[1] = __float22half2_rn(data_h_1);
out_cast[j] = result_f;
mask_32[j] = m_32;
}
int high_index =
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
if (N > high_index) {
float4 rand = curand_uniform4(&state);
float* rand_data = &(rand.x);
int k = 0;
for (int i = high_index; i < N; i++) {
float x_data = (float)input[i] + (float)bias[i % dim];
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
x_data = x_data * scale * m;
x_data += (float)residual[i];
out[i] = __float2half(x_data);
mask[i] = m;
}
}
}
template <typename T>
void launch_dropout(T* out,
const T* input,
const T* residual,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream)
{
assert(unroll_factor == 4);
int total_count = batch * dim / unroll_factor;
dim3 grid_dim = DS_GET_BLOCKS(total_count);
dim3 block_dim = DS_CUDA_NUM_THREADS;
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
total_count, dim, ratio, input, residual, bias, out, mask, seed);
}
template void launch_dropout(float*,
const float*,
const float* residual,
const float* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream);
template void launch_dropout(__half*,
const __half*,
const __half* residual,
const __half* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
cudaStream_t stream);
|
3ce95dc2d5e440b4d4cf3137b0de4d9994832991.hip | // !!! This is a file automatically generated by hipify!!!
#include "caffe/FRCNN/frcnn_vid_data_layer.hpp"
namespace caffe {
namespace Frcnn {
template <typename Dtype>
void FrcnnVidDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data, Image Blob
caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data());
if (this->output_labels_) {
// top[1] is image_info , top[2] is gx_bpxes
caffe_copy(3, batch->label_.gpu_data(), top[1]->mutable_gpu_data());
// Reshape to loaded labels.
top[2]->Reshape(batch->label_.num()-1, batch->label_.channels(), batch->label_.height(), batch->label_.width());
// Copy the labels.
// First five is image_info
caffe_copy(batch->label_.count() - 5, batch->label_.gpu_data() + 5, top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
this->prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(FrcnnVidDataLayer);
} // namespace Frcnn
} // namespace caffe
| 3ce95dc2d5e440b4d4cf3137b0de4d9994832991.cu | #include "caffe/FRCNN/frcnn_vid_data_layer.hpp"
namespace caffe {
namespace Frcnn {
template <typename Dtype>
void FrcnnVidDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data, Image Blob
caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data());
if (this->output_labels_) {
// top[1] is image_info , top[2] is gx_bpxes
caffe_copy(3, batch->label_.gpu_data(), top[1]->mutable_gpu_data());
// Reshape to loaded labels.
top[2]->Reshape(batch->label_.num()-1, batch->label_.channels(), batch->label_.height(), batch->label_.width());
// Copy the labels.
// First five is image_info
caffe_copy(batch->label_.count() - 5, batch->label_.gpu_data() + 5, top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
this->prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(FrcnnVidDataLayer);
} // namespace Frcnn
} // namespace caffe
|
05aec68e54248ff65477f9075fb695c7bdbe8390.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018 ETH Zrich, Thomas Schps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// Avoid warnings in Qt includes with CUDA compiler
#pragma GCC diagnostic ignored "-Wattributes"
// Avoid warnings in Eigen includes with CUDA compiler
#pragma diag_suppress code_is_unreachable
#include "surfel_meshing/cuda_surfel_reconstruction.cuh"
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <libvis/point_cloud.h>
#include <math_constants.h>
#include "surfel_meshing/cuda_matrix.cuh"
#include "surfel_meshing/cuda_util.cuh"
#include "surfel_meshing/surfel.h"
// Uncomment this to run CUDA kernels sequentially for debugging.
// #define CUDA_SEQUENTIAL_CHECKS
namespace vis {
// This threshold is not exposed as a program argument since I am not sure
// whether any other value than 0 would be useful.
constexpr float kSurfelNormalToViewingDirThreshold = 0;
// For a surfel with a given radius, the observation radius can be up to this
// factor worse (larger) while the observation is still integrated into the
// surfel. Observations with larger radii than that are discarded.
// TODO: Expose as a program argument?
constexpr float kMaxObservationRadiusFactorForIntegration = 1.5f;
// Not exposed as a program argument since it did not seem to work well.
constexpr bool kCheckScaleCompatibilityForIntegration = false;
// Not exposed as a program argument since disabling it might not make sense.
constexpr bool kCheckScaleCompatibilityForNeighborAssignment = true;
// If this is set to true, slightly occluded surfels will be protected better,
// but the surfel integration will be unable to merge duplicate surfaces after
// loop closures.
constexpr bool kProtectSlightlyOccludedSurfels = false;
constexpr float kOcclusionDepthFactor = 0.01f;
__forceinline__ __device__ bool IsSurfelActiveForIntegration(
u32 surfel_index,
const CUDABuffer_<float>& surfels,
u32 frame_index,
int surfel_integration_active_window_size) {
// Alternatives:
// kSurfelCreationStamp --> surfels are always deactivated after a certain time and never reactivated. Creates the least artifacts during deformations, but leads to many surfels.
// kSurfelLastUpdateStamp --> surfels stay active. Leads to problems during deformation at observation boundaries (where the surfels are next to each other, but kSurfelLastUpdateStamp differs strongly).
return static_cast<int>(*reinterpret_cast<const u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) >
static_cast<int>(frame_index) - surfel_integration_active_window_size;
}
__global__ void CreateNewSurfelsCUDASerializingKernel(
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<u8> new_surfel_flag_vector) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth_buffer.width() && y < depth_buffer.height()) {
// TODO: Is this border necessary here, or should it rather be integrated into the depth map erosion?
constexpr int kBorder = 1;
bool new_surfel = x >= kBorder &&
y >= kBorder &&
x < depth_buffer.width() - kBorder &&
y < depth_buffer.height() - kBorder &&
depth_buffer(y, x) > 0 &&
supporting_surfels(y, x) == Surfel::kInvalidIndex &&
conflicting_surfels(y, x) == Surfel::kInvalidIndex;
u32 seq_index = x + y * depth_buffer.width();
new_surfel_flag_vector(0, seq_index) = new_surfel ? 1 : 0;
}
}
__global__ void CreateNewSurfelsCUDACreationKernel(
u32 frame_index,
float inv_depth_scaling,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
CUDAMatrix3x4 global_T_local,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u8> new_surfel_flag_vector,
CUDABuffer_<u32> new_surfel_indices,
u32 surfel_count,
CUDABuffer_<float> surfels,
float radius_factor_for_regularization_neighbors_squared) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth_buffer.width() && y < depth_buffer.height()) {
u32 seq_index = x + y * depth_buffer.width();
if (new_surfel_flag_vector(0, seq_index) != 1) {
return;
}
u32 surfel_index = surfel_count + new_surfel_indices(0, seq_index);
float depth = inv_depth_scaling * depth_buffer(y, x);
float3 local_position;
UnprojectPoint(x, y, depth, fx_inv, fy_inv, cx_inv, cy_inv, &local_position);
float3 global_position = global_T_local * local_position;
surfels(kSurfelX, surfel_index) = global_position.x;
surfels(kSurfelY, surfel_index) = global_position.y;
surfels(kSurfelZ, surfel_index) = global_position.z;
surfels(kSurfelSmoothX, surfel_index) = global_position.x;
surfels(kSurfelSmoothY, surfel_index) = global_position.y;
surfels(kSurfelSmoothZ, surfel_index) = global_position.z;
float2 normal_xy = normals_buffer(y, x);
const float normal_z = -sqrtf(::max(0.f, 1 - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y));
float3 global_normal = global_T_local.Rotate(make_float3(normal_xy.x, normal_xy.y, normal_z));
surfels(kSurfelNormalX, surfel_index) = global_normal.x;
surfels(kSurfelNormalY, surfel_index) = global_normal.y;
surfels(kSurfelNormalZ, surfel_index) = global_normal.z;
uchar3 color = color_buffer(y, x);
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(color.x, color.y, color.z, 0);
surfels(kSurfelConfidence, surfel_index) = 1;
*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) = frame_index;
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
const float radius_squared = radius_buffer(y, x);
surfels(kSurfelRadiusSquared, surfel_index) = radius_squared;
// Determine initial neighbors.
float3 neighbor_position_sum = make_float3(0, 0, 0);
int existing_neighbor_count_plus_1 = 1;
constexpr int kDirectionsX[4] = {-1, 1, 0, 0};
constexpr int kDirectionsY[4] = {0, 0, -1, 1};
for (int direction = 0; direction < 4; ++ direction) {
u32 neighbor_index = supporting_surfels(y + kDirectionsY[direction], x + kDirectionsX[direction]);
if (neighbor_index != Surfel::kInvalidIndex) {
float3 this_to_neighbor = make_float3(surfels(kSurfelX, neighbor_index) - global_position.x,
surfels(kSurfelY, neighbor_index) - global_position.y,
surfels(kSurfelZ, neighbor_index) - global_position.z);
float distance_squared =
this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (distance_squared > radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = Surfel::kInvalidIndex;
} else {
neighbor_position_sum = make_float3(
neighbor_position_sum.x + surfels(kSurfelSmoothX, neighbor_index),
neighbor_position_sum.y + surfels(kSurfelSmoothY, neighbor_index),
neighbor_position_sum.z + surfels(kSurfelSmoothZ, neighbor_index));
++ existing_neighbor_count_plus_1;
}
} else {
u32 seq_neighbor_index = (x + kDirectionsX[direction]) + (y + kDirectionsY[direction]) * depth_buffer.width();
if (new_surfel_flag_vector(0, seq_neighbor_index) == 1) {
float other_depth = inv_depth_scaling * depth_buffer(y + kDirectionsY[direction], x + kDirectionsX[direction]);
float approximate_distance_squared = (depth - other_depth) * (depth - other_depth);
if (approximate_distance_squared <= radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = surfel_count + new_surfel_indices(0, seq_neighbor_index);
}
}
}
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + direction, surfel_index)) = neighbor_index;
}
// Try to get a better initialization for the regularized surfel position.
surfels(kSurfelSmoothX, surfel_index) = (surfels(kSurfelSmoothX, surfel_index) + neighbor_position_sum.x) / existing_neighbor_count_plus_1;
surfels(kSurfelSmoothY, surfel_index) = (surfels(kSurfelSmoothY, surfel_index) + neighbor_position_sum.y) / existing_neighbor_count_plus_1;
surfels(kSurfelSmoothZ, surfel_index) = (surfels(kSurfelSmoothZ, surfel_index) + neighbor_position_sum.z) / existing_neighbor_count_plus_1;
}
}
void CreateNewSurfelsCUDA(
hipStream_t stream,
u32 frame_index,
const SE3f& global_T_local,
float depth_scaling,
float radius_factor_for_regularization_neighbors,
const PinholeCamera4f& depth_camera,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<Vec3u8>& color_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& conflicting_surfels,
void** new_surfels_temp_storage,
usize* new_surfels_temp_storage_bytes,
CUDABuffer<u8>* new_surfel_flag_vector,
CUDABuffer<u32>* new_surfel_indices,
u32 surfel_count,
CUDABuffer<float>* surfels,
u32* new_surfel_count,
u8* new_surfel_count_2) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
// Unprojection intrinsics for pixel center convention.
const float fx_inv = 1.0f / fx;
const float fy_inv = 1.0f / fy;
const float cx_pixel_center = cx - 0.5f;
const float cy_pixel_center = cy - 0.5f;
const float cx_inv_pixel_center = -cx_pixel_center / fx;
const float cy_inv_pixel_center = -cy_pixel_center / fy;
// The first kernel marks in a sequential (non-pitched) vector whether a new surfel is created for the corresponding pixel or not.
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(GetBlockCount(depth_buffer.width(), kBlockWidth),
GetBlockCount(depth_buffer.height(), kBlockHeight));
dim3 block_dim(kBlockWidth, kBlockHeight);
hipLaunchKernelGGL(( CreateNewSurfelsCUDASerializingKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
depth_buffer.ToCUDA(),
supporting_surfels.ToCUDA(),
conflicting_surfels.ToCUDA(),
new_surfel_flag_vector->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
// Indices for the new surfels are computed with a parallel exclusive prefix sum from CUB.
if (*new_surfels_temp_storage_bytes == 0) {
hipcub::DeviceScan::ExclusiveSum(
*new_surfels_temp_storage,
*new_surfels_temp_storage_bytes,
new_surfel_flag_vector->ToCUDA().address(),
new_surfel_indices->ToCUDA().address(),
depth_buffer.width() * depth_buffer.height(),
stream);
hipMalloc(new_surfels_temp_storage, *new_surfels_temp_storage_bytes);
}
hipcub::DeviceScan::ExclusiveSum(
*new_surfels_temp_storage,
*new_surfels_temp_storage_bytes,
new_surfel_flag_vector->ToCUDA().address(),
new_surfel_indices->ToCUDA().address(),
depth_buffer.width() * depth_buffer.height(),
stream);
// Read back the number of new surfels to the CPU by reading the last element
// in new_surfel_indices and new_surfel_flag_vector.
// TODO: Do this concurrently with the next kernel call?
new_surfel_indices->DownloadPartAsync(
(depth_buffer.width() * depth_buffer.height() - 1) * sizeof(u32),
1 * sizeof(u32),
stream,
new_surfel_count);
new_surfel_flag_vector->DownloadPartAsync(
(depth_buffer.width() * depth_buffer.height() - 1) * sizeof(u8),
1 * sizeof(u8),
stream,
new_surfel_count_2);
// Now that the indices are known, the actual surfel creation is done.
hipLaunchKernelGGL(( CreateNewSurfelsCUDACreationKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
1.0f / depth_scaling,
fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center,
CUDAMatrix3x4(global_T_local.matrix3x4()),
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
*reinterpret_cast<const CUDABuffer_<uchar3>*>(&color_buffer.ToCUDA()),
supporting_surfels.ToCUDA(),
new_surfel_flag_vector->ToCUDA(),
new_surfel_indices->ToCUDA(),
surfel_count,
surfels->ToCUDA(),
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
template <bool visualize_last_update_timestamp,
bool visualize_creation_timestamp,
bool visualize_radii,
bool visualize_normals>
__global__ void UpdateSurfelVertexBufferCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
u32 point_size_in_floats,
u32 surfel_count,
CUDABuffer_<float> surfels,
u32 latest_triangulated_frame_index,
u32 latest_mesh_surfel_count,
float* vertex_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
const u32 surfel_creation_stamp = *reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index));
// Only output if it is an old surfel that has not been replaced since the last mesh was created,
// or if it is a new surfel which does not appear in the mesh yet.
const bool output_vertex = surfel_creation_stamp <= latest_triangulated_frame_index ||
surfel_index >= latest_mesh_surfel_count;
// Vertex layout (Point3fC3u8):
// float x, float y, float z, u8 r, u8 g, u8 b, u8 unused;
// Using NaN for one of the vertex coordinates to prevent it from being
// drawn if the surfel was replaced recently and the triangulation not
// adjusted yet. This makes the adjacent triangles disappear. Not sure
// whether that is portable, but it works as intended on my system ...
vertex_buffer_ptr[surfel_index * point_size_in_floats + 0] = output_vertex ? surfels(kSurfelSmoothX, surfel_index) : CUDART_NAN_F;
vertex_buffer_ptr[surfel_index * point_size_in_floats + 1] = surfels(kSurfelSmoothY, surfel_index);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 2] = surfels(kSurfelSmoothZ, surfel_index);
if (visualize_last_update_timestamp || visualize_creation_timestamp) {
const u32 last_update_timestamp = *reinterpret_cast<u32*>(&surfels(visualize_creation_timestamp ? kSurfelCreationStamp : kSurfelLastUpdateStamp, surfel_index));
const int age = frame_index - last_update_timestamp;
constexpr int kVisualizationMinAge = 1;
const int kVisualizationMaxAge = visualize_creation_timestamp ? 3000 : surfel_integration_active_window_size;
if (age < kVisualizationMinAge) {
// Special color for surfels updated in the last frame: red.
uchar4 color = make_uchar4(255, 80, 80, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else if (age > kVisualizationMaxAge) {
// Old surfels: blue
uchar4 color = make_uchar4(40, 40, 255, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else {
float blend_factor = (age - kVisualizationMinAge) * 1.0f / (kVisualizationMaxAge - kVisualizationMinAge);
blend_factor = ::min(1.0f, ::max(0.0f, blend_factor));
u8 intensity = 255 - static_cast<u8>(255.99f * blend_factor);
uchar4 color = make_uchar4(intensity, intensity, intensity, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
}
} else if (visualize_radii) {
const float radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
const float radius = sqrtf(radius_squared);
constexpr float kVisualizationMinRadius = 0.0005f; // 0.5 mm
constexpr float kVisualizationMaxRadius = 0.01f; // 1 cm
float blend_factor = (radius - kVisualizationMinRadius) / (kVisualizationMaxRadius - kVisualizationMinRadius);
blend_factor = ::min(1.0f, ::max(0.0f, blend_factor));
u8 red = 255.99f * blend_factor;
u8 green = 255 - red;
u8 blue = 80;
uchar4 color = make_uchar4(red, green, blue, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else if (visualize_normals) {
float3 normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
uchar4 color = make_uchar4(255.99f / 2.0f * (normal.x + 1.0f),
255.99f / 2.0f * (normal.y + 1.0f),
255.99f / 2.0f * (normal.z + 1.0f),
0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else {
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = surfels(kSurfelColor, surfel_index);
}
}
}
void UpdateSurfelVertexBufferCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
u32 surfel_count,
const CUDABuffer<float>& surfels,
u32 latest_triangulated_frame_index,
u32 latest_mesh_surfel_count,
cudaGraphicsResource_t vertex_buffer_resource,
bool visualize_last_update_timestamp,
bool visualize_creation_timestamp,
bool visualize_radii,
bool visualize_normals) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
hipGraphicsMapResources(1, &vertex_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
float* vertex_buffer_ptr;
hipGraphicsResourceGetMappedPointer((void**)&vertex_buffer_ptr, &num_bytes, vertex_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
CHECK(sizeof(Point3fC3u8) % sizeof(float) == 0);
u32 point_size_in_floats = sizeof(Point3fC3u8) / sizeof(float);
#define CALL_KERNEL(visualize_last_update_timestamp, \
visualize_creation_timestamp, \
visualize_radii, \
visualize_normals) \
hipLaunchKernelGGL(( UpdateSurfelVertexBufferCUDAKernel \
<visualize_last_update_timestamp, \
visualize_creation_timestamp, \
visualize_radii, \
visualize_normals>) \
, dim3(grid_dim), dim3(block_dim), 0, stream, \
frame_index, \
surfel_integration_active_window_size, \
point_size_in_floats, \
surfel_count, \
surfels.ToCUDA(), \
latest_triangulated_frame_index, \
latest_mesh_surfel_count, \
vertex_buffer_ptr)
if (visualize_last_update_timestamp) {
CALL_KERNEL(true, false, false, false);
} else if (visualize_creation_timestamp) {
CALL_KERNEL(false, true, false, false);
} else if (visualize_radii) {
CALL_KERNEL(false, false, true, false);
} else if (visualize_normals) {
CALL_KERNEL(false, false, false, true);
}else {
CALL_KERNEL(false, false, false, false);
}
#undef CALL_KERNEL
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipGraphicsUnmapResources(1, &vertex_buffer_resource, stream);
}
__global__ void UpdateNeighborIndexBufferCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
unsigned int* neighbor_index_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
#pragma unroll
for (int i = 0; i < kSurfelNeighborCount; ++ i) {
neighbor_index_buffer_ptr[surfel_index * 2 * kSurfelNeighborCount + 2 * i + 0] = surfel_index;
u32 neighbor_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + i, surfel_index));
neighbor_index_buffer_ptr[surfel_index * 2 * kSurfelNeighborCount + 2 * i + 1] =
(neighbor_index == Surfel::kInvalidIndex) ? surfel_index : neighbor_index;
}
}
}
void UpdateNeighborIndexBufferCUDA(
hipStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
cudaGraphicsResource_t neighbor_index_buffer_resource) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
hipGraphicsMapResources(1, &neighbor_index_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
unsigned int* index_buffer_ptr;
hipGraphicsResourceGetMappedPointer((void**)&index_buffer_ptr, &num_bytes, neighbor_index_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( UpdateNeighborIndexBufferCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
surfel_count,
surfels.ToCUDA(),
index_buffer_ptr);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipGraphicsUnmapResources(1, &neighbor_index_buffer_resource, stream);
}
__global__ void UpdateNormalVertexBufferCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
float* normal_vertex_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
normal_vertex_buffer_ptr[6 * surfel_index + 0] = surfels(kSurfelSmoothX, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 1] = surfels(kSurfelSmoothY, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 2] = surfels(kSurfelSmoothZ, surfel_index);
float radius = sqrtf(surfels(kSurfelRadiusSquared, surfel_index));
normal_vertex_buffer_ptr[6 * surfel_index + 3] = surfels(kSurfelSmoothX, surfel_index) + radius * surfels(kSurfelNormalX, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 4] = surfels(kSurfelSmoothY, surfel_index) + radius * surfels(kSurfelNormalY, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 5] = surfels(kSurfelSmoothZ, surfel_index) + radius * surfels(kSurfelNormalZ, surfel_index);
}
}
void UpdateNormalVertexBufferCUDA(
hipStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
cudaGraphicsResource_t normal_vertex_buffer_resource) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
hipGraphicsMapResources(1, &normal_vertex_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
float* vertex_buffer_ptr;
hipGraphicsResourceGetMappedPointer((void**)&vertex_buffer_ptr, &num_bytes, normal_vertex_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( UpdateNormalVertexBufferCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
surfel_count,
surfels.ToCUDA(),
vertex_buffer_ptr);
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipGraphicsUnmapResources(1, &normal_vertex_buffer_resource, stream);
}
__global__ void BlendMeasurementsCUDAStartKernel(
float depth_scaling,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u8> distance_map,
CUDABuffer_<float> surfel_depth_average_deltas,
CUDABuffer_<u8> new_distance_map,
CUDABuffer_<float> new_surfel_depth_average_deltas) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
constexpr int kBorder = 1;
if (x >= kBorder && y >= kBorder && x < supporting_surfels.width() - kBorder && y < supporting_surfels.height() - kBorder) {
// Only consider pixels with valid measurement depth and supporting surfels.
if (depth_buffer(y, x) == 0 || supporting_surfels(y, x) == Surfel::kInvalidIndex) {
return;
}
bool measurement_border_pixel = false;
bool surfel_border_pixel = false;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (depth_buffer(wy, wx) == 0) {
measurement_border_pixel = true;
} else if (supporting_surfels(wy, wx) == Surfel::kInvalidIndex) {
surfel_border_pixel = true;
}
}
}
if (surfel_border_pixel) {
// TODO: Interpolation should start at the depth after this iteration's integration in this case
new_distance_map(y, x) = 1;
float surfel_depth_average = supporting_surfel_depth_sums(y, x) / supporting_surfel_counts(y, x);
new_surfel_depth_average_deltas(y, x) = surfel_depth_average - depth_buffer(y, x) / depth_scaling;
}
if (measurement_border_pixel) {
distance_map(y, x) = 1;
float surfel_depth_average = supporting_surfel_depth_sums(y, x) / supporting_surfel_counts(y, x);
surfel_depth_average_deltas(y, x) = surfel_depth_average - depth_buffer(y, x) / depth_scaling;
depth_buffer(y, x) = depth_scaling * surfel_depth_average + 0.5f; // TODO: This assignment can happen while other threads read, does it matter?
} else {
distance_map(y, x) = 255; // unknown distance
}
}
}
__global__ void BlendMeasurementsCUDAIterationKernel(
int iteration,
float interpolation_factor_term,
float depth_scaling,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> /*supporting_surfel_counts*/,
CUDABuffer_<float> /*supporting_surfel_depth_sums*/,
CUDABuffer_<u8> distance_map,
CUDABuffer_<float> surfel_depth_average_deltas,
CUDABuffer_<u8> new_distance_map,
CUDABuffer_<float> new_surfel_depth_average_deltas) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
constexpr int kBorder = 1;
if (x >= kBorder && y >= kBorder && x < supporting_surfels.width() - kBorder && y < supporting_surfels.height() - kBorder) {
if (distance_map(y, x) == 255) { // unknown distance
float delta_sum = 0;
int count = 0;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (distance_map(wy, wx) == iteration - 1) {
delta_sum += surfel_depth_average_deltas(wy, wx);
++ count;
}
}
}
if (count > 0) {
distance_map(y, x) = iteration; // TODO: This assignment can happen while other threads read, does it matter?
float surfel_delta_average = delta_sum / count;
surfel_depth_average_deltas(y, x) = surfel_delta_average;
float interpolation_factor = (iteration - 1) * interpolation_factor_term;
depth_buffer(y, x) += depth_scaling * (1 - interpolation_factor) * surfel_delta_average + 0.5f;
}
}
if (depth_buffer(y, x) != 0 && supporting_surfels(y, x) == Surfel::kInvalidIndex && new_distance_map(y, x) == 0) {
float delta_sum = 0;
int count = 0;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (new_distance_map(wy, wx) == iteration - 1) {
delta_sum += new_surfel_depth_average_deltas(wy, wx);
++ count;
}
}
}
if (count > 0) {
new_distance_map(y, x) = iteration; // TODO: This assignment can happen while other threads read, does it matter?
float surfel_delta_average = delta_sum / count;
new_surfel_depth_average_deltas(y, x) = surfel_delta_average;
float interpolation_factor = (iteration - 1) * interpolation_factor_term;
depth_buffer(y, x) += depth_scaling * (1 - interpolation_factor) * surfel_delta_average + 0.5f;
}
}
}
}
void BlendMeasurementsCUDA(
hipStream_t stream,
int measurement_blending_radius,
float depth_correction_factor,
CUDABuffer<u16>* depth_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& supporting_surfel_counts,
const CUDABuffer<float>& supporting_surfel_depth_sums,
CUDABuffer<u8>* distance_map,
CUDABuffer<float>* surfel_depth_average_deltas,
CUDABuffer<u8>* new_distance_map,
CUDABuffer<float>* new_surfel_depth_average_deltas) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
distance_map->Clear(0, stream);
new_distance_map->Clear(0, stream);
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(GetBlockCount(supporting_surfels.width(), kBlockWidth),
GetBlockCount(supporting_surfels.height(), kBlockHeight));
dim3 block_dim(kBlockWidth, kBlockHeight);
// Find pixels with distance == 1, having a depth measurement next to the measurement border, and supporting surfels.
hipLaunchKernelGGL(( BlendMeasurementsCUDAStartKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
1.0f / depth_correction_factor,
depth_buffer->ToCUDA(),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
supporting_surfel_depth_sums.ToCUDA(),
distance_map->ToCUDA(),
surfel_depth_average_deltas->ToCUDA(),
new_distance_map->ToCUDA(),
new_surfel_depth_average_deltas->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
// Find pixels with distances in [2, measurement_blending_radius] and average surfel depths.
for (int iteration = 2; iteration < measurement_blending_radius; ++ iteration) {
hipLaunchKernelGGL(( BlendMeasurementsCUDAIterationKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
iteration,
1.0f / (measurement_blending_radius - 1.0f),
1.0f / depth_correction_factor,
depth_buffer->ToCUDA(),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
supporting_surfel_depth_sums.ToCUDA(),
distance_map->ToCUDA(),
surfel_depth_average_deltas->ToCUDA(),
new_distance_map->ToCUDA(),
new_surfel_depth_average_deltas->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
}
__device__ void IntegrateOrConflictSurfel(
bool integrate, u32 frame_index, int x, int y,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
const float3& cam_space_surfel_pos,
unsigned int surfel_index,
CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
const CUDAMatrix3x4& global_T_local,
float max_surfel_confidence,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32>& /*supporting_surfels*/,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
// Check if this or another surfel is conflicting.
bool conflicting = false;
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting with the measurement.
if (conflicting_surfels(y, x) == surfel_index) {
conflicting = integrate;
}
}
integrate = false;
}
if (!__any(integrate || conflicting)) {
return;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels && first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
// Check whether this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
integrate = false;
}
if (!__any(integrate || conflicting)) {
return;
}
// Read data.
float depth = depth_correction_factor * depth_buffer(y, x);
float3 local_position;
UnprojectPoint(x, y, depth, fx_inv, fy_inv, cx_inv, cy_inv, &local_position);
float3 global_position = global_T_local * local_position;
float2 normal_xy = normals_buffer(y, x);
const float normal_z = -sqrtf(::max(0.f, 1 - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y));
float3 global_normal = global_T_local.Rotate(make_float3(normal_xy.x, normal_xy.y, normal_z));
uchar3 color = color_buffer(y, x);
// Handle conflicts.
// Critical section. HACK: replace surfel x coordinate with NaN to signal locked state.
__syncthreads(); // Not sure if necessary
while (__any(conflicting)) {
float assumed_x = surfels(kSurfelX, surfel_index);
if (conflicting &&
!::isnan(assumed_x) &&
atomicCAS(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)),
__float_as_int(assumed_x),
__float_as_int(CUDART_NAN_F)) == __float_as_int(assumed_x)) {
// Handle the conflict with surfel_index.
float confidence = surfels(kSurfelConfidence, surfel_index);
confidence -= 1;
if (confidence <= 0) {
// Delete the old surfel by replacing it with a new one.
assumed_x = global_position.x;
surfels(kSurfelY, surfel_index) = global_position.y;
surfels(kSurfelZ, surfel_index) = global_position.z;
surfels(kSurfelSmoothX, surfel_index) = global_position.x;
surfels(kSurfelSmoothY, surfel_index) = global_position.y;
surfels(kSurfelSmoothZ, surfel_index) = global_position.z;
surfels(kSurfelNormalX, surfel_index) = global_normal.x;
surfels(kSurfelNormalY, surfel_index) = global_normal.y;
surfels(kSurfelNormalZ, surfel_index) = global_normal.z;
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(color.x, color.y, color.z, 1); // Sets the neighbor detach request flag.
surfels(kSurfelRadiusSquared, surfel_index) = radius_buffer(y, x);
#pragma unroll
for (int i = 0; i < kSurfelNeighborCount; ++ i) {
// TODO: (Sh/c)ould the neighbors be initialized to something here instead of being removed completely?
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + i, surfel_index)) = Surfel::kInvalidIndex;
}
surfels(kSurfelConfidence, surfel_index) = 1;
*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) = frame_index;
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
} else {
surfels(kSurfelConfidence, surfel_index) = confidence;
}
// Release lock by setting x coordinate.
// Not sure whether the atomicExch is necessary here, an atomic assignment would suffice.
atomicExch(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)), __float_as_int(assumed_x));
conflicting = false;
}
// Force execution of the if case to avoid hang coming from the fact that
// only the threads which don't go into the if case are executed otherwise.
__syncthreads();
}
// Early exit if none of the threads in the warp needs to integrate data.
if (!__any(integrate)) {
return;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float dot_angle = global_surfel_normal.x * global_normal.x +
global_surfel_normal.y * global_normal.y +
global_surfel_normal.z * global_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
integrate = false;
}
}
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared < 0) {
integrate = false;
}
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
}
// Integrate.
// Critical section. HACK: replace surfel x coordinate with NaN to signal locked state.
__syncthreads(); // Not sure if necessary
while (__any(integrate)) {
const float assumed_x = surfels(kSurfelX, surfel_index);
if (integrate &&
!::isnan(assumed_x) &&
atomicCAS(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)),
__float_as_int(assumed_x),
__float_as_int(CUDART_NAN_F)) == __float_as_int(assumed_x)) {
// TODO: Check why this max(1, ...) is necessary
const float weight = 1.0f / ::max(1, supporting_surfel_counts(y, x));
float new_surfel_x = assumed_x;
// If the surfel has been created (i.e., replaced) in this iteration, do not
// integrate the data, since the association is probably not valid anymore.
// Also, the neighbor detach request flag should be kept in that case.
if (*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) < frame_index) {
const float confidence = surfels(kSurfelConfidence, surfel_index);
surfels(kSurfelConfidence, surfel_index) =
(confidence + weight < max_surfel_confidence) ?
(confidence + weight) :
max_surfel_confidence;
float normalization_factor = 1.0f / (confidence + weight);
new_surfel_x = (confidence * assumed_x + weight * global_position.x) * normalization_factor; // assumed_x is the old surfel x value.
surfels(kSurfelY, surfel_index) = (confidence * surfels(kSurfelY, surfel_index) + weight * global_position.y) * normalization_factor;
surfels(kSurfelZ, surfel_index) = (confidence * surfels(kSurfelZ, surfel_index) + weight * global_position.z) * normalization_factor;
float3 new_normal = make_float3(confidence * surfels(kSurfelNormalX, surfel_index) + weight * global_normal.x,
confidence * surfels(kSurfelNormalY, surfel_index) + weight * global_normal.y,
confidence * surfels(kSurfelNormalZ, surfel_index) + weight * global_normal.z);
float normal_normalization = 1.0f / sqrtf(new_normal.x * new_normal.x + new_normal.y * new_normal.y + new_normal.z * new_normal.z);
surfels(kSurfelNormalX, surfel_index) = normal_normalization * new_normal.x;
surfels(kSurfelNormalY, surfel_index) = normal_normalization * new_normal.y;
surfels(kSurfelNormalZ, surfel_index) = normal_normalization * new_normal.z;
surfels(kSurfelRadiusSquared, surfel_index) = ::min(surfels(kSurfelRadiusSquared, surfel_index), radius_buffer(y, x));
const uchar4 old_color = *(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index)));
const uchar3 new_color = make_uchar3(
(confidence * old_color.x + weight * color.x) * normalization_factor + 0.5f,
(confidence * old_color.y + weight * color.y) * normalization_factor + 0.5f,
(confidence * old_color.z + weight * color.z) * normalization_factor + 0.5f);
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(new_color.x, new_color.y, new_color.z, 0); // NOTE: Unsets the neighbor detach request flag
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
}
// Release lock by setting x coordinate.
// Not sure whether the atomicExch is necessary here, an atomic assignment would suffice.
atomicExch(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)), __float_as_int(new_surfel_x));
integrate = false;
}
// Force execution of the if case to avoid hang coming from the fact that
// only the threads which don't go into the if case are executed otherwise.
__syncthreads();
}
}
__global__ void IntegrateMeasurementsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float max_surfel_confidence,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float inv_depth_scaling,
float fx, float fy, float cx, float cy,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
CUDAMatrix3x4 local_T_global,
CUDAMatrix3x4 global_T_local,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
bool integrate = true;
// Check whether the surfel projects onto the image. Keep all threads active
// such that the __syncthreads() later will work.
if (surfel_index >= surfel_count) {
surfel_index = 0;
integrate = false;
}
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit decision can be done earlier?
integrate = false;
}
// Early exit?
if (!__any(integrate)) {
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height()) {
px = 0;
py = 0;
integrate = false;
}
if (surfels(kSurfelRadiusSquared, surfel_index) < 0) {
integrate = false;
}
// Early exit?
if (!__any(integrate)) {
return;
}
IntegrateOrConflictSurfel(
integrate, frame_index, px, py,
fx_inv, fy_inv, cx_inv, cy_inv,
local_position,
surfel_index, surfels,
local_T_global,
global_T_local,
max_surfel_confidence,
sensor_noise_factor,
cos_normal_compatibility_threshold,
inv_depth_scaling, depth_buffer,
normals_buffer,
radius_buffer,
color_buffer,
supporting_surfels,
supporting_surfel_counts,
conflicting_surfels,
first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
int offset_x = 0;
int offset_y = 0;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < depth_buffer.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < depth_buffer.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
IntegrateOrConflictSurfel(
integrate, frame_index, offset_x, offset_y,
fx_inv, fy_inv, cx_inv, cy_inv,
local_position,
surfel_index, surfels,
local_T_global,
global_T_local,
max_surfel_confidence,
sensor_noise_factor,
cos_normal_compatibility_threshold,
inv_depth_scaling, depth_buffer,
normals_buffer,
radius_buffer,
color_buffer,
supporting_surfels,
supporting_surfel_counts,
conflicting_surfels,
first_surfel_depth);
// TODO: use half integration weight if the surfel is associated to two pixels?
}
void IntegrateMeasurementsCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float max_surfel_confidence,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& global_T_local,
float depth_scaling,
const PinholeCamera4f& depth_camera,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<Vec3u8>& color_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& supporting_surfel_counts,
const CUDABuffer<u32>& conflicting_surfels,
const CUDABuffer<float>& first_surfel_depth,
u32 surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
// Unprojection intrinsics for pixel center convention.
const float fx_inv = 1.0f / fx;
const float fy_inv = 1.0f / fy;
const float cx_pixel_center = cx - 0.5f;
const float cy_pixel_center = cy - 0.5f;
const float cx_inv_pixel_center = -cx_pixel_center / fx;
const float cy_inv_pixel_center = -cy_pixel_center / fy;
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( IntegrateMeasurementsCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_integration_active_window_size,
max_surfel_confidence,
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
1.0f / depth_scaling,
fx, fy, cx, cy,
fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center,
CUDAMatrix3x4(global_T_local.inverse().matrix3x4()),
CUDAMatrix3x4(global_T_local.matrix3x4()),
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
*reinterpret_cast<const CUDABuffer_<uchar3>*>(&color_buffer.ToCUDA()),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
conflicting_surfels.ToCUDA(),
first_surfel_depth.ToCUDA(),
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void UpdateNeighborsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float radius_factor_for_regularization_neighbors_squared,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> /*conflicting_surfels*/,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> /*normals_buffer*/,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<float> first_surfel_depth,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
// Project the surfel into the image.
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 cam_space_surfel_pos = local_T_global * global_position;
if (cam_space_surfel_pos.z <= 0) {
// TODO: Compute z before x and y such that this early exit decision can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (cam_space_surfel_pos.x / cam_space_surfel_pos.z) + cx,
fy * (cam_space_surfel_pos.y / cam_space_surfel_pos.z) + cy);
int x = static_cast<int>(pixel_pos.x);
int y = static_cast<int>(pixel_pos.y);
// Use 1 pixel border.
constexpr int kBorder = 1;
if (x < kBorder || y < kBorder ||
x >= supporting_surfels.width() - kBorder || y >= supporting_surfels.height() - kBorder) {
return;
}
// Is the surfel occluded?
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
if (cam_space_surfel_pos.z > occlusion_depth) {
return;
}
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal (if enabled).
/*if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < kNormalCompatibilityThreshold) {
return;
}
}*/
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared < 0) {
return;
}
if (kCheckScaleCompatibilityForNeighborAssignment) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
return;
}
}
// We think that the surfel is visible, update its neighbors.
float radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
float3 global_normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
// Compute distances to existing neighbors.
float neighbor_distances_squared[kSurfelNeighborCount];
u32 neighbor_surfel_indices[kSurfelNeighborCount];
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
neighbor_surfel_indices[n] = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + n, surfel_index));
if (neighbor_surfel_indices[n] == Surfel::kInvalidIndex) {
neighbor_distances_squared[n] = CUDART_INF_F;
} else {
float3 neighbor_position =
make_float3(surfels(kSurfelX, neighbor_surfel_indices[n]),
surfels(kSurfelY, neighbor_surfel_indices[n]),
surfels(kSurfelZ, neighbor_surfel_indices[n]));
float3 surfel_to_neighbor = make_float3(
global_position.x - neighbor_position.x,
global_position.y - neighbor_position.y,
global_position.z - neighbor_position.z);
neighbor_distances_squared[n] = surfel_to_neighbor.x * surfel_to_neighbor.x + surfel_to_neighbor.y * surfel_to_neighbor.y + surfel_to_neighbor.z * surfel_to_neighbor.z;
}
}
constexpr int kDirectionsX[4] = {-1, 1, 0, 0};
constexpr int kDirectionsY[4] = {0, 0, -1, 1};
for (int direction = 0; direction < 4; ++ direction) {
u32 neighbor_index = supporting_surfels(y + kDirectionsY[direction], x + kDirectionsX[direction]);
if (neighbor_index != Surfel::kInvalidIndex &&
neighbor_index != surfel_index) {
// Check for closeness.
float3 this_to_neighbor = make_float3(surfels(kSurfelX, neighbor_index) - global_position.x,
surfels(kSurfelY, neighbor_index) - global_position.y,
surfels(kSurfelZ, neighbor_index) - global_position.z);
float distance_squared =
this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (distance_squared > radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = Surfel::kInvalidIndex;
}
if (neighbor_index != Surfel::kInvalidIndex) {
// Check for compatible normal.
float3 neighbor_normal =
make_float3(surfels(kSurfelNormalX, neighbor_index),
surfels(kSurfelNormalY, neighbor_index),
surfels(kSurfelNormalZ, neighbor_index));
float normal_dot = global_normal.x * neighbor_normal.x +
global_normal.y * neighbor_normal.y +
global_normal.z * neighbor_normal.z;
if (normal_dot <= 0) {
neighbor_index = Surfel::kInvalidIndex;
}
if (neighbor_index != Surfel::kInvalidIndex) {
// Check whether it is already a neighbor, or find the best insertion slot.
int best_n = -1;
float best_distance_squared = -1;
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
if (neighbor_index == neighbor_surfel_indices[n]) {
best_n = -1;
break;
} else if (neighbor_distances_squared[n] > best_distance_squared) {
best_n = n;
best_distance_squared = neighbor_distances_squared[n];
}
}
if (best_n >= 0 && distance_squared < best_distance_squared) {
neighbor_surfel_indices[best_n] = neighbor_index;
neighbor_distances_squared[best_n] = distance_squared;
}
}
}
}
}
// Write the neighbor indices back to global memory.
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + n, surfel_index)) = neighbor_surfel_indices[n];
}
}
}
__global__ void UpdateNeighborsCUDARemoveReplacedNeighborsKernel(
u32 frame_index,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index != Surfel::kInvalidIndex) {
if (*reinterpret_cast<u8*>(&reinterpret_cast<uchar4*>(&surfels(kSurfelColor, neighbor_surfel_index))->w) == 1) {
// This neighbor has the neighbor detach request flag set. Remove it.
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index)) = Surfel::kInvalidIndex;
}
}
}
}
}
void UpdateNeighborsCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float radius_factor_for_regularization_neighbors,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& conflicting_surfels,
const PinholeCamera4f& depth_camera,
const SE3f& local_T_global,
float sensor_noise_factor,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<float>& first_surfel_depth,
usize surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kSurfelsBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kSurfelsBlockWidth));
dim3 block_dim(kSurfelsBlockWidth);
hipLaunchKernelGGL(( UpdateNeighborsCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_integration_active_window_size,
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors,
supporting_surfels.ToCUDA(),
conflicting_surfels.ToCUDA(),
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
first_surfel_depth.ToCUDA(),
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipLaunchKernelGGL(( UpdateNeighborsCUDARemoveReplacedNeighborsKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__forceinline__ __device__ void RenderMinDepthAtPixel(
int x, int y,
const float3& cam_space_surfel_pos,
CUDABuffer_<float>& first_surfel_depth) {
// Should behave properly as long as all the floats are positive.
atomicMin(reinterpret_cast<int*>(&first_surfel_depth(y, x)), __float_as_int(cam_space_surfel_pos.z));
}
__global__ void RenderMinDepthCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
u32 surfel_count,
CUDABuffer_<float> surfels,
CUDABuffer_<float> first_surfel_depth) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= first_surfel_depth.width() || py >= first_surfel_depth.height()) {
return;
}
RenderMinDepthAtPixel(
px, py, local_position,
first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
bool integrate = true;
int offset_x;
int offset_y;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < first_surfel_depth.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < first_surfel_depth.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
if (integrate) {
RenderMinDepthAtPixel(
offset_x, offset_y, local_position,
first_surfel_depth);
}
}
}
void RenderMinDepthCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( RenderMinDepthCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
surfel_count,
surfels.ToCUDA(),
first_surfel_depth->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__device__ void ConsiderSurfelAssociationToPixel(
int x, int y,
const float3& cam_space_surfel_pos,
unsigned int surfel_index,
const CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
const CUDABuffer_<float2>& normals_buffer,
const CUDABuffer_<float>& radius_buffer,
CUDABuffer_<u32>& supporting_surfels,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<float>& supporting_surfel_depth_sums,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
return;
}
// Check if this or another surfel is conflicting.
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting.
conflicting_surfels(y, x) = surfel_index;
}
return;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
// Check if this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
return;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
// HACK: Avoid creation of a new surfel here in case there is no other conflicting or supporting surfel
// by setting conflicting_surfels(y, x) to an invalid index unequal to Surfel::kInvalidIndex.
// TODO: This can be harmful since it can prevent the creation of valid surfaces. Delete it?
// atomicCAS(&conflicting_surfels(y, x), Surfel::kInvalidIndex, Surfel::kInvalidIndex - 1);
return;
}
}
// The measurement seems to belong to the same surface as the surfel.
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared <= 0) {
return;
}
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
// HACK: Avoid creation of a new surfel here in case there is no other conflicting or supporting surfel
// by setting conflicting_surfels(y, x) to an invalid index unequal to Surfel::kInvalidIndex.
atomicCAS(&conflicting_surfels(y, x), Surfel::kInvalidIndex, Surfel::kInvalidIndex - 1);
return;
}
}
// Replace the supporting surfel entry only if it was previously empty
atomicCAS(&supporting_surfels(y, x), Surfel::kInvalidIndex, surfel_index);
// Add to supporting surfel count for the pixel
atomicAdd(&supporting_surfel_counts(y, x), 1);
// Add to the supporting surfel depth sum for the pixel
atomicAdd(&supporting_surfel_depth_sums(y, x), cam_space_surfel_pos.z);
}
__global__ void AssociateSurfelsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
u32 surfel_count,
CUDABuffer_<float> surfels,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height()) {
return;
}
ConsiderSurfelAssociationToPixel(
px, py, local_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
bool integrate = true;
int offset_x;
int offset_y;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < depth_buffer.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < depth_buffer.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
if (integrate) {
ConsiderSurfelAssociationToPixel(
offset_x, offset_y, local_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
}
}
}
void AssociateSurfelsCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
CUDABuffer<u32>* supporting_surfels,
CUDABuffer<u32>* supporting_surfel_counts,
CUDABuffer<float>* supporting_surfel_depth_sums,
CUDABuffer<u32>* conflicting_surfels,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( AssociateSurfelsCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
surfel_count,
surfels.ToCUDA(),
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
supporting_surfels->ToCUDA(),
supporting_surfel_counts->ToCUDA(),
supporting_surfel_depth_sums->ToCUDA(),
conflicting_surfels->ToCUDA(),
first_surfel_depth->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
constexpr int kMergeBlockWidth = 1024;
__device__ bool ConsiderSurfelMergeAtPixel(
int x, int y,
const float3& cam_space_surfel_pos,
const float3& global_surfel_pos,
unsigned int surfel_index,
CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
const CUDABuffer_<float2>& normals_buffer,
const CUDABuffer_<float>& radius_buffer,
CUDABuffer_<u32>& supporting_surfels,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<float>& supporting_surfel_depth_sums,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
return false;
}
// Check if this or another surfel is conflicting.
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting.
conflicting_surfels(y, x) = surfel_index;
}
return false;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
// Check if this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
return false;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return false;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
return false;
}
}
// The measurement seems to belong to the same surface as the surfel.
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
return false;
}
}
// Never merge the supported surfel.
u32 supported_surfel = supporting_surfels(y, x);
if (supported_surfel == surfel_index || supported_surfel == Surfel::kInvalidIndex) {
return false;
}
// Compare the surfel to the supported surfel. Merge only if very similar.
// Radius:
const float other_radius_squared = surfels(kSurfelRadiusSquared, supported_surfel);
float radius_diff = surfel_radius_squared / other_radius_squared;
constexpr float kRadiusDiffThreshold = 1.2f;
constexpr float kRadiusDiffThresholdSq = kRadiusDiffThreshold * kRadiusDiffThreshold;
if (radius_diff > kRadiusDiffThresholdSq || radius_diff < 1 / kRadiusDiffThresholdSq) {
return false;
}
// Distance:
float3 other_global_position =
make_float3(surfels(kSurfelX, supported_surfel),
surfels(kSurfelY, supported_surfel),
surfels(kSurfelZ, supported_surfel));
float distance_squared = SquaredDistance(global_surfel_pos, other_global_position);
constexpr float kDistanceThresholdFactor = 0.5f * (0.25f * 0.25f);
if (distance_squared > kDistanceThresholdFactor * (surfel_radius_squared + other_radius_squared)) {
return false;
}
// Normal:
float3 other_surfel_normal = make_float3(surfels(kSurfelNormalX, supported_surfel),
surfels(kSurfelNormalY, supported_surfel),
surfels(kSurfelNormalZ, supported_surfel));
dot_angle = Dot(global_surfel_normal, other_surfel_normal);
constexpr float kCosNormalMergeThreshold = 0.93969f; // 20 degrees
if (dot_angle < kCosNormalMergeThreshold) {
return false;
}
// Merge the surfel.
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = 0;
surfels(kSurfelRadiusSquared, surfel_index) = -1;
*reinterpret_cast<u8*>(&reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))->w) = 1; // Set neighbor detach request flag
return true;
}
__global__ void MergeSurfelsCUDAKernel(
u32 /*frame_index*/,
int /*surfel_integration_active_window_size*/,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
u32 surfel_count,
CUDABuffer_<float> surfels,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth,
CUDABuffer_<u32> num_merges_buffer) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
bool merged = false;
if (surfel_index < surfel_count) {
// if (IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
if (surfels(kSurfelRadiusSquared, surfel_index) >= 0) {
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z > 0) { // TODO: Compute z before x and y such that this early exit can be done earlier?
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (!(pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height())) {
merged = ConsiderSurfelMergeAtPixel(
px, py, local_position, global_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
}
}
}
}
typedef typename hipcub::BlockReduce<int, kMergeBlockWidth, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage temp_storage;
int num_merges = BlockReduceInt(temp_storage).Sum(merged ? 1 : 0);
if (threadIdx.x == 0 && num_merges > 0) {
atomicAdd(&num_merges_buffer(0, 0), static_cast<u32>(num_merges));
}
}
void MergeSurfelsCUDA(
hipStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
CUDABuffer<u32>* supporting_surfels,
CUDABuffer<u32>* supporting_surfel_counts,
CUDABuffer<float>* supporting_surfel_depth_sums,
CUDABuffer<u32>* conflicting_surfels,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
u32* merge_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
dim3 grid_dim(GetBlockCount(surfel_count, kMergeBlockWidth));
dim3 block_dim(kMergeBlockWidth);
static CUDABuffer<u32> num_merges_buffer(1, 1); // TODO: do not use static
num_merges_buffer.Clear(0, stream);
hipLaunchKernelGGL(( MergeSurfelsCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
surfel_count,
surfels->ToCUDA(),
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
supporting_surfels->ToCUDA(),
supporting_surfel_counts->ToCUDA(),
supporting_surfel_depth_sums->ToCUDA(),
conflicting_surfels->ToCUDA(),
first_surfel_depth->ToCUDA(),
num_merges_buffer.ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
u32 num_merges = 0;
num_merges_buffer.DownloadAsync(stream, &num_merges);
hipStreamSynchronize(stream);
*merge_count += num_merges;
}
__global__ void RegularizeSurfelsCUDAClearGradientsKernel(
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// TODO: Put this in the last kernel of the denoising (and in the
// initialization) and expect that it remains zero in-between the
// calls in order to save one kernel call? Is this used anywhere else?
surfels(kSurfelGradientX, surfel_index) = 0;
surfels(kSurfelGradientY, surfel_index) = 0;
surfels(kSurfelGradientZ, surfel_index) = 0;
surfels(kSurfelGradientCount, surfel_index) = 0;
}
}
__global__ void RegularizeSurfelsCUDAAccumulateNeighborGradientsKernel(
u32 frame_index,
int regularization_frame_window_size,
float radius_factor_for_regularization_neighbors_squared,
float regularizer_weight,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// Count neighbors.
int neighbor_count = 0;
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, neighbor_surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
continue;
}
++ neighbor_count;
}
if (neighbor_count == 0) {
return;
}
float3 smooth_position =
make_float3(surfels(kSurfelSmoothX, surfel_index),
surfels(kSurfelSmoothY, surfel_index),
surfels(kSurfelSmoothZ, surfel_index));
float3 normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
// Accumulate gradient terms for neighbors.
float factor = 2 * regularizer_weight / neighbor_count;
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, neighbor_surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
continue;
}
float3 neighbor_position =
make_float3(surfels(kSurfelSmoothX, neighbor_surfel_index),
surfels(kSurfelSmoothY, neighbor_surfel_index),
surfels(kSurfelSmoothZ, neighbor_surfel_index));
float3 this_to_neighbor =
make_float3(neighbor_position.x - smooth_position.x,
neighbor_position.y - smooth_position.y,
neighbor_position.z - smooth_position.z);
float factor_times_normal_dot_difference = factor * (normal.x * this_to_neighbor.x + normal.y * this_to_neighbor.y + normal.z * this_to_neighbor.z);
float3 gradient_term_for_neighbor =
make_float3(factor_times_normal_dot_difference * normal.x,
factor_times_normal_dot_difference * normal.y,
factor_times_normal_dot_difference * normal.z);
atomicAdd(&surfels(kSurfelGradientX, neighbor_surfel_index), gradient_term_for_neighbor.x);
atomicAdd(&surfels(kSurfelGradientY, neighbor_surfel_index), gradient_term_for_neighbor.y);
atomicAdd(&surfels(kSurfelGradientZ, neighbor_surfel_index), gradient_term_for_neighbor.z);
atomicAdd(&surfels(kSurfelGradientCount, neighbor_surfel_index), regularizer_weight / neighbor_count);
// If the neighbor is too far away, remove it.
// NOTE / TODO: it can still happen that there are far away but inactive
// neighbors, which will influence an active surfel, since
// this check only removes active neighbors.
// However, I think this should be relatively rare.
float neighbor_distance_squared = this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (neighbor_distance_squared > radius_factor_for_regularization_neighbors_squared * surfel_radius_squared) {
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index)) = Surfel::kInvalidIndex;
}
}
}
}
__global__ void RegularizeSurfelsCUDAKernel(
u32 frame_index,
int regularization_frame_window_size,
float regularizer_weight,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
float3 measured_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 smooth_position =
make_float3(surfels(kSurfelSmoothX, surfel_index),
surfels(kSurfelSmoothY, surfel_index),
surfels(kSurfelSmoothZ, surfel_index));
float3 normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
// Data term and neighbor-induced gradient terms
constexpr float data_term_factor = 2;
float3 gradient =
make_float3(data_term_factor * (smooth_position.x - measured_position.x) + surfels(kSurfelGradientX, surfel_index),
data_term_factor * (smooth_position.y - measured_position.y) + surfels(kSurfelGradientY, surfel_index),
data_term_factor * (smooth_position.z - measured_position.z) + surfels(kSurfelGradientZ, surfel_index));
// Regularization gradient terms
int neighbor_count = 0;
float3 regularization_gradient = make_float3(0, 0, 0);
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
++ neighbor_count;
float3 neighbor_position =
make_float3(surfels(kSurfelSmoothX, neighbor_surfel_index),
surfels(kSurfelSmoothY, neighbor_surfel_index),
surfels(kSurfelSmoothZ, neighbor_surfel_index));
float3 this_to_neighbor =
make_float3(neighbor_position.x - smooth_position.x,
neighbor_position.y - smooth_position.y,
neighbor_position.z - smooth_position.z);
float normal_dot_difference = normal.x * this_to_neighbor.x + normal.y * this_to_neighbor.y + normal.z * this_to_neighbor.z;
regularization_gradient =
make_float3(regularization_gradient.x - normal_dot_difference * normal.x,
regularization_gradient.y - normal_dot_difference * normal.y,
regularization_gradient.z - normal_dot_difference * normal.z);
}
if (neighbor_count > 0) {
// Apply constant factor to regularization gradient term
float factor = 2 * regularizer_weight / neighbor_count;
gradient =
make_float3(gradient.x + factor * regularization_gradient.x,
gradient.y + factor * regularization_gradient.y,
gradient.z + factor * regularization_gradient.z);
}
const float residual_terms_weight_sum = 1 + regularizer_weight + surfels(kSurfelGradientCount, surfel_index);
const float kStepSizeFactor = 0.5f / residual_terms_weight_sum;
// Avoid divergence by limiting the step length to a multiple of the surfel
// radius (multiple with this factor here).
// TODO: It seems that this is not necessary anymore now that the step size
// is more intelligently chosen. Remove it (after some more extensive
// testing).
constexpr float kMaxStepLengthFactor = 1.0f;
float max_step_length = kMaxStepLengthFactor * sqrtf(surfels(kSurfelRadiusSquared, surfel_index));
float step_length = kStepSizeFactor * sqrtf(gradient.x * gradient.x + gradient.y * gradient.y + gradient.z * gradient.z);
float step_factor = kStepSizeFactor;
if (step_length > max_step_length) {
step_factor = max_step_length / step_length * kStepSizeFactor;
}
// NOTE: Writing the update into the gradient first to avoid race conditions
// (the smooth position may still be used by neighboring surfel updates).
// The next kernel call will move the result to the smooth position field.
surfels(kSurfelGradientX, surfel_index) = smooth_position.x - step_factor * gradient.x;
surfels(kSurfelGradientY, surfel_index) = smooth_position.y - step_factor * gradient.y;
surfels(kSurfelGradientZ, surfel_index) = smooth_position.z - step_factor * gradient.z;
}
}
__global__ void RegularizeSurfelsCUDAUpdateKernel(
u32 frame_index,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
surfels(kSurfelSmoothX, surfel_index) = surfels(kSurfelGradientX, surfel_index);
surfels(kSurfelSmoothY, surfel_index) = surfels(kSurfelGradientY, surfel_index);
surfels(kSurfelSmoothZ, surfel_index) = surfels(kSurfelGradientZ, surfel_index);
}
}
__global__ void RegularizeSurfelsCUDACopyOnlyKernel(
u32 frame_index,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// TODO: Only changed surfels need to be touched here.
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
surfels(kSurfelSmoothX, surfel_index) = surfels(kSurfelX, surfel_index);
surfels(kSurfelSmoothY, surfel_index) = surfels(kSurfelY, surfel_index);
surfels(kSurfelSmoothZ, surfel_index) = surfels(kSurfelZ, surfel_index);
}
}
void RegularizeSurfelsCUDA(
hipStream_t stream,
bool disable_denoising,
u32 frame_index,
float radius_factor_for_regularization_neighbors,
float regularizer_weight,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
if (disable_denoising) {
// Only copy the raw surfel positions to the smoothed position fields.
hipLaunchKernelGGL(( RegularizeSurfelsCUDACopyOnlyKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
regularization_frame_window_size,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
return;
}
hipLaunchKernelGGL(( RegularizeSurfelsCUDAClearGradientsKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipLaunchKernelGGL(( RegularizeSurfelsCUDAAccumulateNeighborGradientsKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
regularization_frame_window_size,
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors,
regularizer_weight,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipLaunchKernelGGL(( RegularizeSurfelsCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
regularization_frame_window_size,
regularizer_weight,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
hipLaunchKernelGGL(( RegularizeSurfelsCUDAUpdateKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
frame_index,
regularization_frame_window_size,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void ExportVerticesCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
CUDABuffer_<float> position_buffer,
CUDABuffer_<u8> color_buffer) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
bool merged = surfels(kSurfelRadiusSquared, surfel_index) < 0;
float* position_ptr = position_buffer.address();
position_ptr[3 * surfel_index + 0] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothX, surfel_index);
position_ptr[3 * surfel_index + 1] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothY, surfel_index);
position_ptr[3 * surfel_index + 2] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothZ, surfel_index);
const uchar4 color = *(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index)));
u8* color_ptr = color_buffer.address();
color_ptr[3 * surfel_index + 0] = color.x;
color_ptr[3 * surfel_index + 1] = color.y;
color_ptr[3 * surfel_index + 2] = color.z;
}
}
void ExportVerticesCUDA(
hipStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
CUDABuffer<float>* position_buffer,
CUDABuffer<u8>* color_buffer) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( ExportVerticesCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
surfel_count,
surfels.ToCUDA(),
position_buffer->ToCUDA(),
color_buffer->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void DebugPrintSurfelCUDAKernel(
usize surfel_index,
CUDABuffer_<float> surfels) {
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("DEBUGGING surfel %i on GPU ...\n", static_cast<int>(surfel_index));
printf("DEBUG surfel raw position x on GPU: %f\n", surfels(kSurfelX, surfel_index));
printf("DEBUG surfel raw position y on GPU: %f\n", surfels(kSurfelY, surfel_index));
printf("DEBUG surfel raw position z on GPU: %f\n", surfels(kSurfelZ, surfel_index));
printf("DEBUG surfel smooth position x on GPU: %f\n", surfels(kSurfelSmoothX, surfel_index));
printf("DEBUG surfel smooth position y on GPU: %f\n", surfels(kSurfelSmoothY, surfel_index));
printf("DEBUG surfel smooth position z on GPU: %f\n", surfels(kSurfelSmoothZ, surfel_index));
printf("DEBUG surfel creation stamp on GPU: %i\n", static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index))));
}
}
void DebugPrintSurfelCUDA(
hipStream_t stream,
usize surfel_index,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 32;
dim3 grid_dim(1);
dim3 block_dim(kBlockWidth);
hipLaunchKernelGGL(( DebugPrintSurfelCUDAKernel)
, dim3(grid_dim), dim3(block_dim), 0, stream,
surfel_index,
surfels.ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
hipDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
}
| 05aec68e54248ff65477f9075fb695c7bdbe8390.cu | // Copyright 2018 ETH Zürich, Thomas Schöps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// Avoid warnings in Qt includes with CUDA compiler
#pragma GCC diagnostic ignored "-Wattributes"
// Avoid warnings in Eigen includes with CUDA compiler
#pragma diag_suppress code_is_unreachable
#include "surfel_meshing/cuda_surfel_reconstruction.cuh"
#include <cub/block/block_reduce.cuh>
#include <cub/device/device_scan.cuh>
#include <libvis/point_cloud.h>
#include <math_constants.h>
#include "surfel_meshing/cuda_matrix.cuh"
#include "surfel_meshing/cuda_util.cuh"
#include "surfel_meshing/surfel.h"
// Uncomment this to run CUDA kernels sequentially for debugging.
// #define CUDA_SEQUENTIAL_CHECKS
namespace vis {
// This threshold is not exposed as a program argument since I am not sure
// whether any other value than 0 would be useful.
constexpr float kSurfelNormalToViewingDirThreshold = 0;
// For a surfel with a given radius, the observation radius can be up to this
// factor worse (larger) while the observation is still integrated into the
// surfel. Observations with larger radii than that are discarded.
// TODO: Expose as a program argument?
constexpr float kMaxObservationRadiusFactorForIntegration = 1.5f;
// Not exposed as a program argument since it did not seem to work well.
constexpr bool kCheckScaleCompatibilityForIntegration = false;
// Not exposed as a program argument since disabling it might not make sense.
constexpr bool kCheckScaleCompatibilityForNeighborAssignment = true;
// If this is set to true, slightly occluded surfels will be protected better,
// but the surfel integration will be unable to merge duplicate surfaces after
// loop closures.
constexpr bool kProtectSlightlyOccludedSurfels = false;
constexpr float kOcclusionDepthFactor = 0.01f;
__forceinline__ __device__ bool IsSurfelActiveForIntegration(
u32 surfel_index,
const CUDABuffer_<float>& surfels,
u32 frame_index,
int surfel_integration_active_window_size) {
// Alternatives:
// kSurfelCreationStamp --> surfels are always deactivated after a certain time and never reactivated. Creates the least artifacts during deformations, but leads to many surfels.
// kSurfelLastUpdateStamp --> surfels stay active. Leads to problems during deformation at observation boundaries (where the surfels are next to each other, but kSurfelLastUpdateStamp differs strongly).
return static_cast<int>(*reinterpret_cast<const u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) >
static_cast<int>(frame_index) - surfel_integration_active_window_size;
}
__global__ void CreateNewSurfelsCUDASerializingKernel(
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<u8> new_surfel_flag_vector) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth_buffer.width() && y < depth_buffer.height()) {
// TODO: Is this border necessary here, or should it rather be integrated into the depth map erosion?
constexpr int kBorder = 1;
bool new_surfel = x >= kBorder &&
y >= kBorder &&
x < depth_buffer.width() - kBorder &&
y < depth_buffer.height() - kBorder &&
depth_buffer(y, x) > 0 &&
supporting_surfels(y, x) == Surfel::kInvalidIndex &&
conflicting_surfels(y, x) == Surfel::kInvalidIndex;
u32 seq_index = x + y * depth_buffer.width();
new_surfel_flag_vector(0, seq_index) = new_surfel ? 1 : 0;
}
}
__global__ void CreateNewSurfelsCUDACreationKernel(
u32 frame_index,
float inv_depth_scaling,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
CUDAMatrix3x4 global_T_local,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u8> new_surfel_flag_vector,
CUDABuffer_<u32> new_surfel_indices,
u32 surfel_count,
CUDABuffer_<float> surfels,
float radius_factor_for_regularization_neighbors_squared) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth_buffer.width() && y < depth_buffer.height()) {
u32 seq_index = x + y * depth_buffer.width();
if (new_surfel_flag_vector(0, seq_index) != 1) {
return;
}
u32 surfel_index = surfel_count + new_surfel_indices(0, seq_index);
float depth = inv_depth_scaling * depth_buffer(y, x);
float3 local_position;
UnprojectPoint(x, y, depth, fx_inv, fy_inv, cx_inv, cy_inv, &local_position);
float3 global_position = global_T_local * local_position;
surfels(kSurfelX, surfel_index) = global_position.x;
surfels(kSurfelY, surfel_index) = global_position.y;
surfels(kSurfelZ, surfel_index) = global_position.z;
surfels(kSurfelSmoothX, surfel_index) = global_position.x;
surfels(kSurfelSmoothY, surfel_index) = global_position.y;
surfels(kSurfelSmoothZ, surfel_index) = global_position.z;
float2 normal_xy = normals_buffer(y, x);
const float normal_z = -sqrtf(::max(0.f, 1 - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y));
float3 global_normal = global_T_local.Rotate(make_float3(normal_xy.x, normal_xy.y, normal_z));
surfels(kSurfelNormalX, surfel_index) = global_normal.x;
surfels(kSurfelNormalY, surfel_index) = global_normal.y;
surfels(kSurfelNormalZ, surfel_index) = global_normal.z;
uchar3 color = color_buffer(y, x);
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(color.x, color.y, color.z, 0);
surfels(kSurfelConfidence, surfel_index) = 1;
*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) = frame_index;
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
const float radius_squared = radius_buffer(y, x);
surfels(kSurfelRadiusSquared, surfel_index) = radius_squared;
// Determine initial neighbors.
float3 neighbor_position_sum = make_float3(0, 0, 0);
int existing_neighbor_count_plus_1 = 1;
constexpr int kDirectionsX[4] = {-1, 1, 0, 0};
constexpr int kDirectionsY[4] = {0, 0, -1, 1};
for (int direction = 0; direction < 4; ++ direction) {
u32 neighbor_index = supporting_surfels(y + kDirectionsY[direction], x + kDirectionsX[direction]);
if (neighbor_index != Surfel::kInvalidIndex) {
float3 this_to_neighbor = make_float3(surfels(kSurfelX, neighbor_index) - global_position.x,
surfels(kSurfelY, neighbor_index) - global_position.y,
surfels(kSurfelZ, neighbor_index) - global_position.z);
float distance_squared =
this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (distance_squared > radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = Surfel::kInvalidIndex;
} else {
neighbor_position_sum = make_float3(
neighbor_position_sum.x + surfels(kSurfelSmoothX, neighbor_index),
neighbor_position_sum.y + surfels(kSurfelSmoothY, neighbor_index),
neighbor_position_sum.z + surfels(kSurfelSmoothZ, neighbor_index));
++ existing_neighbor_count_plus_1;
}
} else {
u32 seq_neighbor_index = (x + kDirectionsX[direction]) + (y + kDirectionsY[direction]) * depth_buffer.width();
if (new_surfel_flag_vector(0, seq_neighbor_index) == 1) {
float other_depth = inv_depth_scaling * depth_buffer(y + kDirectionsY[direction], x + kDirectionsX[direction]);
float approximate_distance_squared = (depth - other_depth) * (depth - other_depth);
if (approximate_distance_squared <= radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = surfel_count + new_surfel_indices(0, seq_neighbor_index);
}
}
}
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + direction, surfel_index)) = neighbor_index;
}
// Try to get a better initialization for the regularized surfel position.
surfels(kSurfelSmoothX, surfel_index) = (surfels(kSurfelSmoothX, surfel_index) + neighbor_position_sum.x) / existing_neighbor_count_plus_1;
surfels(kSurfelSmoothY, surfel_index) = (surfels(kSurfelSmoothY, surfel_index) + neighbor_position_sum.y) / existing_neighbor_count_plus_1;
surfels(kSurfelSmoothZ, surfel_index) = (surfels(kSurfelSmoothZ, surfel_index) + neighbor_position_sum.z) / existing_neighbor_count_plus_1;
}
}
void CreateNewSurfelsCUDA(
cudaStream_t stream,
u32 frame_index,
const SE3f& global_T_local,
float depth_scaling,
float radius_factor_for_regularization_neighbors,
const PinholeCamera4f& depth_camera,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<Vec3u8>& color_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& conflicting_surfels,
void** new_surfels_temp_storage,
usize* new_surfels_temp_storage_bytes,
CUDABuffer<u8>* new_surfel_flag_vector,
CUDABuffer<u32>* new_surfel_indices,
u32 surfel_count,
CUDABuffer<float>* surfels,
u32* new_surfel_count,
u8* new_surfel_count_2) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
// Unprojection intrinsics for pixel center convention.
const float fx_inv = 1.0f / fx;
const float fy_inv = 1.0f / fy;
const float cx_pixel_center = cx - 0.5f;
const float cy_pixel_center = cy - 0.5f;
const float cx_inv_pixel_center = -cx_pixel_center / fx;
const float cy_inv_pixel_center = -cy_pixel_center / fy;
// The first kernel marks in a sequential (non-pitched) vector whether a new surfel is created for the corresponding pixel or not.
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(GetBlockCount(depth_buffer.width(), kBlockWidth),
GetBlockCount(depth_buffer.height(), kBlockHeight));
dim3 block_dim(kBlockWidth, kBlockHeight);
CreateNewSurfelsCUDASerializingKernel
<<<grid_dim, block_dim, 0, stream>>>(
depth_buffer.ToCUDA(),
supporting_surfels.ToCUDA(),
conflicting_surfels.ToCUDA(),
new_surfel_flag_vector->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
// Indices for the new surfels are computed with a parallel exclusive prefix sum from CUB.
if (*new_surfels_temp_storage_bytes == 0) {
cub::DeviceScan::ExclusiveSum(
*new_surfels_temp_storage,
*new_surfels_temp_storage_bytes,
new_surfel_flag_vector->ToCUDA().address(),
new_surfel_indices->ToCUDA().address(),
depth_buffer.width() * depth_buffer.height(),
stream);
cudaMalloc(new_surfels_temp_storage, *new_surfels_temp_storage_bytes);
}
cub::DeviceScan::ExclusiveSum(
*new_surfels_temp_storage,
*new_surfels_temp_storage_bytes,
new_surfel_flag_vector->ToCUDA().address(),
new_surfel_indices->ToCUDA().address(),
depth_buffer.width() * depth_buffer.height(),
stream);
// Read back the number of new surfels to the CPU by reading the last element
// in new_surfel_indices and new_surfel_flag_vector.
// TODO: Do this concurrently with the next kernel call?
new_surfel_indices->DownloadPartAsync(
(depth_buffer.width() * depth_buffer.height() - 1) * sizeof(u32),
1 * sizeof(u32),
stream,
new_surfel_count);
new_surfel_flag_vector->DownloadPartAsync(
(depth_buffer.width() * depth_buffer.height() - 1) * sizeof(u8),
1 * sizeof(u8),
stream,
new_surfel_count_2);
// Now that the indices are known, the actual surfel creation is done.
CreateNewSurfelsCUDACreationKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
1.0f / depth_scaling,
fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center,
CUDAMatrix3x4(global_T_local.matrix3x4()),
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
*reinterpret_cast<const CUDABuffer_<uchar3>*>(&color_buffer.ToCUDA()),
supporting_surfels.ToCUDA(),
new_surfel_flag_vector->ToCUDA(),
new_surfel_indices->ToCUDA(),
surfel_count,
surfels->ToCUDA(),
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
template <bool visualize_last_update_timestamp,
bool visualize_creation_timestamp,
bool visualize_radii,
bool visualize_normals>
__global__ void UpdateSurfelVertexBufferCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
u32 point_size_in_floats,
u32 surfel_count,
CUDABuffer_<float> surfels,
u32 latest_triangulated_frame_index,
u32 latest_mesh_surfel_count,
float* vertex_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
const u32 surfel_creation_stamp = *reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index));
// Only output if it is an old surfel that has not been replaced since the last mesh was created,
// or if it is a new surfel which does not appear in the mesh yet.
const bool output_vertex = surfel_creation_stamp <= latest_triangulated_frame_index ||
surfel_index >= latest_mesh_surfel_count;
// Vertex layout (Point3fC3u8):
// float x, float y, float z, u8 r, u8 g, u8 b, u8 unused;
// Using NaN for one of the vertex coordinates to prevent it from being
// drawn if the surfel was replaced recently and the triangulation not
// adjusted yet. This makes the adjacent triangles disappear. Not sure
// whether that is portable, but it works as intended on my system ...
vertex_buffer_ptr[surfel_index * point_size_in_floats + 0] = output_vertex ? surfels(kSurfelSmoothX, surfel_index) : CUDART_NAN_F;
vertex_buffer_ptr[surfel_index * point_size_in_floats + 1] = surfels(kSurfelSmoothY, surfel_index);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 2] = surfels(kSurfelSmoothZ, surfel_index);
if (visualize_last_update_timestamp || visualize_creation_timestamp) {
const u32 last_update_timestamp = *reinterpret_cast<u32*>(&surfels(visualize_creation_timestamp ? kSurfelCreationStamp : kSurfelLastUpdateStamp, surfel_index));
const int age = frame_index - last_update_timestamp;
constexpr int kVisualizationMinAge = 1;
const int kVisualizationMaxAge = visualize_creation_timestamp ? 3000 : surfel_integration_active_window_size;
if (age < kVisualizationMinAge) {
// Special color for surfels updated in the last frame: red.
uchar4 color = make_uchar4(255, 80, 80, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else if (age > kVisualizationMaxAge) {
// Old surfels: blue
uchar4 color = make_uchar4(40, 40, 255, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else {
float blend_factor = (age - kVisualizationMinAge) * 1.0f / (kVisualizationMaxAge - kVisualizationMinAge);
blend_factor = ::min(1.0f, ::max(0.0f, blend_factor));
u8 intensity = 255 - static_cast<u8>(255.99f * blend_factor);
uchar4 color = make_uchar4(intensity, intensity, intensity, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
}
} else if (visualize_radii) {
const float radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
const float radius = sqrtf(radius_squared);
constexpr float kVisualizationMinRadius = 0.0005f; // 0.5 mm
constexpr float kVisualizationMaxRadius = 0.01f; // 1 cm
float blend_factor = (radius - kVisualizationMinRadius) / (kVisualizationMaxRadius - kVisualizationMinRadius);
blend_factor = ::min(1.0f, ::max(0.0f, blend_factor));
u8 red = 255.99f * blend_factor;
u8 green = 255 - red;
u8 blue = 80;
uchar4 color = make_uchar4(red, green, blue, 0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else if (visualize_normals) {
float3 normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
uchar4 color = make_uchar4(255.99f / 2.0f * (normal.x + 1.0f),
255.99f / 2.0f * (normal.y + 1.0f),
255.99f / 2.0f * (normal.z + 1.0f),
0);
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = *reinterpret_cast<float*>(&color);
} else {
vertex_buffer_ptr[surfel_index * point_size_in_floats + 3] = surfels(kSurfelColor, surfel_index);
}
}
}
void UpdateSurfelVertexBufferCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
u32 surfel_count,
const CUDABuffer<float>& surfels,
u32 latest_triangulated_frame_index,
u32 latest_mesh_surfel_count,
cudaGraphicsResource_t vertex_buffer_resource,
bool visualize_last_update_timestamp,
bool visualize_creation_timestamp,
bool visualize_radii,
bool visualize_normals) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
cudaGraphicsMapResources(1, &vertex_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
float* vertex_buffer_ptr;
cudaGraphicsResourceGetMappedPointer((void**)&vertex_buffer_ptr, &num_bytes, vertex_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
CHECK(sizeof(Point3fC3u8) % sizeof(float) == 0);
u32 point_size_in_floats = sizeof(Point3fC3u8) / sizeof(float);
#define CALL_KERNEL(visualize_last_update_timestamp, \
visualize_creation_timestamp, \
visualize_radii, \
visualize_normals) \
UpdateSurfelVertexBufferCUDAKernel \
<visualize_last_update_timestamp, \
visualize_creation_timestamp, \
visualize_radii, \
visualize_normals> \
<<<grid_dim, block_dim, 0, stream>>>( \
frame_index, \
surfel_integration_active_window_size, \
point_size_in_floats, \
surfel_count, \
surfels.ToCUDA(), \
latest_triangulated_frame_index, \
latest_mesh_surfel_count, \
vertex_buffer_ptr)
if (visualize_last_update_timestamp) {
CALL_KERNEL(true, false, false, false);
} else if (visualize_creation_timestamp) {
CALL_KERNEL(false, true, false, false);
} else if (visualize_radii) {
CALL_KERNEL(false, false, true, false);
} else if (visualize_normals) {
CALL_KERNEL(false, false, false, true);
}else {
CALL_KERNEL(false, false, false, false);
}
#undef CALL_KERNEL
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
cudaGraphicsUnmapResources(1, &vertex_buffer_resource, stream);
}
__global__ void UpdateNeighborIndexBufferCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
unsigned int* neighbor_index_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
#pragma unroll
for (int i = 0; i < kSurfelNeighborCount; ++ i) {
neighbor_index_buffer_ptr[surfel_index * 2 * kSurfelNeighborCount + 2 * i + 0] = surfel_index;
u32 neighbor_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + i, surfel_index));
neighbor_index_buffer_ptr[surfel_index * 2 * kSurfelNeighborCount + 2 * i + 1] =
(neighbor_index == Surfel::kInvalidIndex) ? surfel_index : neighbor_index;
}
}
}
void UpdateNeighborIndexBufferCUDA(
cudaStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
cudaGraphicsResource_t neighbor_index_buffer_resource) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
cudaGraphicsMapResources(1, &neighbor_index_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
unsigned int* index_buffer_ptr;
cudaGraphicsResourceGetMappedPointer((void**)&index_buffer_ptr, &num_bytes, neighbor_index_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
UpdateNeighborIndexBufferCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
surfel_count,
surfels.ToCUDA(),
index_buffer_ptr);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
cudaGraphicsUnmapResources(1, &neighbor_index_buffer_resource, stream);
}
__global__ void UpdateNormalVertexBufferCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
float* normal_vertex_buffer_ptr) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
normal_vertex_buffer_ptr[6 * surfel_index + 0] = surfels(kSurfelSmoothX, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 1] = surfels(kSurfelSmoothY, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 2] = surfels(kSurfelSmoothZ, surfel_index);
float radius = sqrtf(surfels(kSurfelRadiusSquared, surfel_index));
normal_vertex_buffer_ptr[6 * surfel_index + 3] = surfels(kSurfelSmoothX, surfel_index) + radius * surfels(kSurfelNormalX, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 4] = surfels(kSurfelSmoothY, surfel_index) + radius * surfels(kSurfelNormalY, surfel_index);
normal_vertex_buffer_ptr[6 * surfel_index + 5] = surfels(kSurfelSmoothZ, surfel_index) + radius * surfels(kSurfelNormalZ, surfel_index);
}
}
void UpdateNormalVertexBufferCUDA(
cudaStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
cudaGraphicsResource_t normal_vertex_buffer_resource) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
// Map OpenGL buffer object for writing from CUDA.
cudaGraphicsMapResources(1, &normal_vertex_buffer_resource, stream);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
usize num_bytes;
float* vertex_buffer_ptr;
cudaGraphicsResourceGetMappedPointer((void**)&vertex_buffer_ptr, &num_bytes, normal_vertex_buffer_resource);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
UpdateNormalVertexBufferCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
surfel_count,
surfels.ToCUDA(),
vertex_buffer_ptr);
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
cudaGraphicsUnmapResources(1, &normal_vertex_buffer_resource, stream);
}
__global__ void BlendMeasurementsCUDAStartKernel(
float depth_scaling,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u8> distance_map,
CUDABuffer_<float> surfel_depth_average_deltas,
CUDABuffer_<u8> new_distance_map,
CUDABuffer_<float> new_surfel_depth_average_deltas) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
constexpr int kBorder = 1;
if (x >= kBorder && y >= kBorder && x < supporting_surfels.width() - kBorder && y < supporting_surfels.height() - kBorder) {
// Only consider pixels with valid measurement depth and supporting surfels.
if (depth_buffer(y, x) == 0 || supporting_surfels(y, x) == Surfel::kInvalidIndex) {
return;
}
bool measurement_border_pixel = false;
bool surfel_border_pixel = false;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (depth_buffer(wy, wx) == 0) {
measurement_border_pixel = true;
} else if (supporting_surfels(wy, wx) == Surfel::kInvalidIndex) {
surfel_border_pixel = true;
}
}
}
if (surfel_border_pixel) {
// TODO: Interpolation should start at the depth after this iteration's integration in this case
new_distance_map(y, x) = 1;
float surfel_depth_average = supporting_surfel_depth_sums(y, x) / supporting_surfel_counts(y, x);
new_surfel_depth_average_deltas(y, x) = surfel_depth_average - depth_buffer(y, x) / depth_scaling;
}
if (measurement_border_pixel) {
distance_map(y, x) = 1;
float surfel_depth_average = supporting_surfel_depth_sums(y, x) / supporting_surfel_counts(y, x);
surfel_depth_average_deltas(y, x) = surfel_depth_average - depth_buffer(y, x) / depth_scaling;
depth_buffer(y, x) = depth_scaling * surfel_depth_average + 0.5f; // TODO: This assignment can happen while other threads read, does it matter?
} else {
distance_map(y, x) = 255; // unknown distance
}
}
}
__global__ void BlendMeasurementsCUDAIterationKernel(
int iteration,
float interpolation_factor_term,
float depth_scaling,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> /*supporting_surfel_counts*/,
CUDABuffer_<float> /*supporting_surfel_depth_sums*/,
CUDABuffer_<u8> distance_map,
CUDABuffer_<float> surfel_depth_average_deltas,
CUDABuffer_<u8> new_distance_map,
CUDABuffer_<float> new_surfel_depth_average_deltas) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
constexpr int kBorder = 1;
if (x >= kBorder && y >= kBorder && x < supporting_surfels.width() - kBorder && y < supporting_surfels.height() - kBorder) {
if (distance_map(y, x) == 255) { // unknown distance
float delta_sum = 0;
int count = 0;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (distance_map(wy, wx) == iteration - 1) {
delta_sum += surfel_depth_average_deltas(wy, wx);
++ count;
}
}
}
if (count > 0) {
distance_map(y, x) = iteration; // TODO: This assignment can happen while other threads read, does it matter?
float surfel_delta_average = delta_sum / count;
surfel_depth_average_deltas(y, x) = surfel_delta_average;
float interpolation_factor = (iteration - 1) * interpolation_factor_term;
depth_buffer(y, x) += depth_scaling * (1 - interpolation_factor) * surfel_delta_average + 0.5f;
}
}
if (depth_buffer(y, x) != 0 && supporting_surfels(y, x) == Surfel::kInvalidIndex && new_distance_map(y, x) == 0) {
float delta_sum = 0;
int count = 0;
for (int wy = y - 1, wy_end = y + 1; wy <= wy_end; ++ wy) {
for (int wx = x - 1, wx_end = x + 1; wx <= wx_end; ++ wx) {
if (new_distance_map(wy, wx) == iteration - 1) {
delta_sum += new_surfel_depth_average_deltas(wy, wx);
++ count;
}
}
}
if (count > 0) {
new_distance_map(y, x) = iteration; // TODO: This assignment can happen while other threads read, does it matter?
float surfel_delta_average = delta_sum / count;
new_surfel_depth_average_deltas(y, x) = surfel_delta_average;
float interpolation_factor = (iteration - 1) * interpolation_factor_term;
depth_buffer(y, x) += depth_scaling * (1 - interpolation_factor) * surfel_delta_average + 0.5f;
}
}
}
}
void BlendMeasurementsCUDA(
cudaStream_t stream,
int measurement_blending_radius,
float depth_correction_factor,
CUDABuffer<u16>* depth_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& supporting_surfel_counts,
const CUDABuffer<float>& supporting_surfel_depth_sums,
CUDABuffer<u8>* distance_map,
CUDABuffer<float>* surfel_depth_average_deltas,
CUDABuffer<u8>* new_distance_map,
CUDABuffer<float>* new_surfel_depth_average_deltas) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
distance_map->Clear(0, stream);
new_distance_map->Clear(0, stream);
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(GetBlockCount(supporting_surfels.width(), kBlockWidth),
GetBlockCount(supporting_surfels.height(), kBlockHeight));
dim3 block_dim(kBlockWidth, kBlockHeight);
// Find pixels with distance == 1, having a depth measurement next to the measurement border, and supporting surfels.
BlendMeasurementsCUDAStartKernel
<<<grid_dim, block_dim, 0, stream>>>(
1.0f / depth_correction_factor,
depth_buffer->ToCUDA(),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
supporting_surfel_depth_sums.ToCUDA(),
distance_map->ToCUDA(),
surfel_depth_average_deltas->ToCUDA(),
new_distance_map->ToCUDA(),
new_surfel_depth_average_deltas->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
// Find pixels with distances in [2, measurement_blending_radius] and average surfel depths.
for (int iteration = 2; iteration < measurement_blending_radius; ++ iteration) {
BlendMeasurementsCUDAIterationKernel
<<<grid_dim, block_dim, 0, stream>>>(
iteration,
1.0f / (measurement_blending_radius - 1.0f),
1.0f / depth_correction_factor,
depth_buffer->ToCUDA(),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
supporting_surfel_depth_sums.ToCUDA(),
distance_map->ToCUDA(),
surfel_depth_average_deltas->ToCUDA(),
new_distance_map->ToCUDA(),
new_surfel_depth_average_deltas->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
}
__device__ void IntegrateOrConflictSurfel(
bool integrate, u32 frame_index, int x, int y,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
const float3& cam_space_surfel_pos,
unsigned int surfel_index,
CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
const CUDAMatrix3x4& global_T_local,
float max_surfel_confidence,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32>& /*supporting_surfels*/,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
// Check if this or another surfel is conflicting.
bool conflicting = false;
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting with the measurement.
if (conflicting_surfels(y, x) == surfel_index) {
conflicting = integrate;
}
}
integrate = false;
}
if (!__any(integrate || conflicting)) {
return;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels && first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
// Check whether this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
integrate = false;
}
if (!__any(integrate || conflicting)) {
return;
}
// Read data.
float depth = depth_correction_factor * depth_buffer(y, x);
float3 local_position;
UnprojectPoint(x, y, depth, fx_inv, fy_inv, cx_inv, cy_inv, &local_position);
float3 global_position = global_T_local * local_position;
float2 normal_xy = normals_buffer(y, x);
const float normal_z = -sqrtf(::max(0.f, 1 - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y));
float3 global_normal = global_T_local.Rotate(make_float3(normal_xy.x, normal_xy.y, normal_z));
uchar3 color = color_buffer(y, x);
// Handle conflicts.
// Critical section. HACK: replace surfel x coordinate with NaN to signal locked state.
__syncthreads(); // Not sure if necessary
while (__any(conflicting)) {
float assumed_x = surfels(kSurfelX, surfel_index);
if (conflicting &&
!::isnan(assumed_x) &&
atomicCAS(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)),
__float_as_int(assumed_x),
__float_as_int(CUDART_NAN_F)) == __float_as_int(assumed_x)) {
// Handle the conflict with surfel_index.
float confidence = surfels(kSurfelConfidence, surfel_index);
confidence -= 1;
if (confidence <= 0) {
// Delete the old surfel by replacing it with a new one.
assumed_x = global_position.x;
surfels(kSurfelY, surfel_index) = global_position.y;
surfels(kSurfelZ, surfel_index) = global_position.z;
surfels(kSurfelSmoothX, surfel_index) = global_position.x;
surfels(kSurfelSmoothY, surfel_index) = global_position.y;
surfels(kSurfelSmoothZ, surfel_index) = global_position.z;
surfels(kSurfelNormalX, surfel_index) = global_normal.x;
surfels(kSurfelNormalY, surfel_index) = global_normal.y;
surfels(kSurfelNormalZ, surfel_index) = global_normal.z;
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(color.x, color.y, color.z, 1); // Sets the neighbor detach request flag.
surfels(kSurfelRadiusSquared, surfel_index) = radius_buffer(y, x);
#pragma unroll
for (int i = 0; i < kSurfelNeighborCount; ++ i) {
// TODO: (Sh/c)ould the neighbors be initialized to something here instead of being removed completely?
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + i, surfel_index)) = Surfel::kInvalidIndex;
}
surfels(kSurfelConfidence, surfel_index) = 1;
*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) = frame_index;
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
} else {
surfels(kSurfelConfidence, surfel_index) = confidence;
}
// Release lock by setting x coordinate.
// Not sure whether the atomicExch is necessary here, an atomic assignment would suffice.
atomicExch(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)), __float_as_int(assumed_x));
conflicting = false;
}
// Force execution of the if case to avoid hang coming from the fact that
// only the threads which don't go into the if case are executed otherwise.
__syncthreads();
}
// Early exit if none of the threads in the warp needs to integrate data.
if (!__any(integrate)) {
return;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float dot_angle = global_surfel_normal.x * global_normal.x +
global_surfel_normal.y * global_normal.y +
global_surfel_normal.z * global_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
integrate = false;
}
}
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared < 0) {
integrate = false;
}
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
}
// Integrate.
// Critical section. HACK: replace surfel x coordinate with NaN to signal locked state.
__syncthreads(); // Not sure if necessary
while (__any(integrate)) {
const float assumed_x = surfels(kSurfelX, surfel_index);
if (integrate &&
!::isnan(assumed_x) &&
atomicCAS(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)),
__float_as_int(assumed_x),
__float_as_int(CUDART_NAN_F)) == __float_as_int(assumed_x)) {
// TODO: Check why this max(1, ...) is necessary
const float weight = 1.0f / ::max(1, supporting_surfel_counts(y, x));
float new_surfel_x = assumed_x;
// If the surfel has been created (i.e., replaced) in this iteration, do not
// integrate the data, since the association is probably not valid anymore.
// Also, the neighbor detach request flag should be kept in that case.
if (*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index)) < frame_index) {
const float confidence = surfels(kSurfelConfidence, surfel_index);
surfels(kSurfelConfidence, surfel_index) =
(confidence + weight < max_surfel_confidence) ?
(confidence + weight) :
max_surfel_confidence;
float normalization_factor = 1.0f / (confidence + weight);
new_surfel_x = (confidence * assumed_x + weight * global_position.x) * normalization_factor; // assumed_x is the old surfel x value.
surfels(kSurfelY, surfel_index) = (confidence * surfels(kSurfelY, surfel_index) + weight * global_position.y) * normalization_factor;
surfels(kSurfelZ, surfel_index) = (confidence * surfels(kSurfelZ, surfel_index) + weight * global_position.z) * normalization_factor;
float3 new_normal = make_float3(confidence * surfels(kSurfelNormalX, surfel_index) + weight * global_normal.x,
confidence * surfels(kSurfelNormalY, surfel_index) + weight * global_normal.y,
confidence * surfels(kSurfelNormalZ, surfel_index) + weight * global_normal.z);
float normal_normalization = 1.0f / sqrtf(new_normal.x * new_normal.x + new_normal.y * new_normal.y + new_normal.z * new_normal.z);
surfels(kSurfelNormalX, surfel_index) = normal_normalization * new_normal.x;
surfels(kSurfelNormalY, surfel_index) = normal_normalization * new_normal.y;
surfels(kSurfelNormalZ, surfel_index) = normal_normalization * new_normal.z;
surfels(kSurfelRadiusSquared, surfel_index) = ::min(surfels(kSurfelRadiusSquared, surfel_index), radius_buffer(y, x));
const uchar4 old_color = *(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index)));
const uchar3 new_color = make_uchar3(
(confidence * old_color.x + weight * color.x) * normalization_factor + 0.5f,
(confidence * old_color.y + weight * color.y) * normalization_factor + 0.5f,
(confidence * old_color.z + weight * color.z) * normalization_factor + 0.5f);
*(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))) = make_uchar4(new_color.x, new_color.y, new_color.z, 0); // NOTE: Unsets the neighbor detach request flag
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = frame_index;
}
// Release lock by setting x coordinate.
// Not sure whether the atomicExch is necessary here, an atomic assignment would suffice.
atomicExch(reinterpret_cast<int*>(&surfels(kSurfelX, surfel_index)), __float_as_int(new_surfel_x));
integrate = false;
}
// Force execution of the if case to avoid hang coming from the fact that
// only the threads which don't go into the if case are executed otherwise.
__syncthreads();
}
}
__global__ void IntegrateMeasurementsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float max_surfel_confidence,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float inv_depth_scaling,
float fx, float fy, float cx, float cy,
float fx_inv, float fy_inv, float cx_inv, float cy_inv,
CUDAMatrix3x4 local_T_global,
CUDAMatrix3x4 global_T_local,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<uchar3> color_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
bool integrate = true;
// Check whether the surfel projects onto the image. Keep all threads active
// such that the __syncthreads() later will work.
if (surfel_index >= surfel_count) {
surfel_index = 0;
integrate = false;
}
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
integrate = false;
}
if (!__any(integrate)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit decision can be done earlier?
integrate = false;
}
// Early exit?
if (!__any(integrate)) {
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height()) {
px = 0;
py = 0;
integrate = false;
}
if (surfels(kSurfelRadiusSquared, surfel_index) < 0) {
integrate = false;
}
// Early exit?
if (!__any(integrate)) {
return;
}
IntegrateOrConflictSurfel(
integrate, frame_index, px, py,
fx_inv, fy_inv, cx_inv, cy_inv,
local_position,
surfel_index, surfels,
local_T_global,
global_T_local,
max_surfel_confidence,
sensor_noise_factor,
cos_normal_compatibility_threshold,
inv_depth_scaling, depth_buffer,
normals_buffer,
radius_buffer,
color_buffer,
supporting_surfels,
supporting_surfel_counts,
conflicting_surfels,
first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
int offset_x = 0;
int offset_y = 0;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < depth_buffer.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < depth_buffer.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
IntegrateOrConflictSurfel(
integrate, frame_index, offset_x, offset_y,
fx_inv, fy_inv, cx_inv, cy_inv,
local_position,
surfel_index, surfels,
local_T_global,
global_T_local,
max_surfel_confidence,
sensor_noise_factor,
cos_normal_compatibility_threshold,
inv_depth_scaling, depth_buffer,
normals_buffer,
radius_buffer,
color_buffer,
supporting_surfels,
supporting_surfel_counts,
conflicting_surfels,
first_surfel_depth);
// TODO: use half integration weight if the surfel is associated to two pixels?
}
void IntegrateMeasurementsCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float max_surfel_confidence,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& global_T_local,
float depth_scaling,
const PinholeCamera4f& depth_camera,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<Vec3u8>& color_buffer,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& supporting_surfel_counts,
const CUDABuffer<u32>& conflicting_surfels,
const CUDABuffer<float>& first_surfel_depth,
u32 surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
// Unprojection intrinsics for pixel center convention.
const float fx_inv = 1.0f / fx;
const float fy_inv = 1.0f / fy;
const float cx_pixel_center = cx - 0.5f;
const float cy_pixel_center = cy - 0.5f;
const float cx_inv_pixel_center = -cx_pixel_center / fx;
const float cy_inv_pixel_center = -cy_pixel_center / fy;
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
IntegrateMeasurementsCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_integration_active_window_size,
max_surfel_confidence,
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
1.0f / depth_scaling,
fx, fy, cx, cy,
fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center,
CUDAMatrix3x4(global_T_local.inverse().matrix3x4()),
CUDAMatrix3x4(global_T_local.matrix3x4()),
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
*reinterpret_cast<const CUDABuffer_<uchar3>*>(&color_buffer.ToCUDA()),
supporting_surfels.ToCUDA(),
supporting_surfel_counts.ToCUDA(),
conflicting_surfels.ToCUDA(),
first_surfel_depth.ToCUDA(),
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void UpdateNeighborsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float radius_factor_for_regularization_neighbors_squared,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> /*conflicting_surfels*/,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> /*normals_buffer*/,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<float> first_surfel_depth,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
// Project the surfel into the image.
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 cam_space_surfel_pos = local_T_global * global_position;
if (cam_space_surfel_pos.z <= 0) {
// TODO: Compute z before x and y such that this early exit decision can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (cam_space_surfel_pos.x / cam_space_surfel_pos.z) + cx,
fy * (cam_space_surfel_pos.y / cam_space_surfel_pos.z) + cy);
int x = static_cast<int>(pixel_pos.x);
int y = static_cast<int>(pixel_pos.y);
// Use 1 pixel border.
constexpr int kBorder = 1;
if (x < kBorder || y < kBorder ||
x >= supporting_surfels.width() - kBorder || y >= supporting_surfels.height() - kBorder) {
return;
}
// Is the surfel occluded?
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
if (cam_space_surfel_pos.z > occlusion_depth) {
return;
}
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal (if enabled).
/*if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < kNormalCompatibilityThreshold) {
return;
}
}*/
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared < 0) {
return;
}
if (kCheckScaleCompatibilityForNeighborAssignment) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
return;
}
}
// We think that the surfel is visible, update its neighbors.
float radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
float3 global_normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
// Compute distances to existing neighbors.
float neighbor_distances_squared[kSurfelNeighborCount];
u32 neighbor_surfel_indices[kSurfelNeighborCount];
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
neighbor_surfel_indices[n] = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + n, surfel_index));
if (neighbor_surfel_indices[n] == Surfel::kInvalidIndex) {
neighbor_distances_squared[n] = CUDART_INF_F;
} else {
float3 neighbor_position =
make_float3(surfels(kSurfelX, neighbor_surfel_indices[n]),
surfels(kSurfelY, neighbor_surfel_indices[n]),
surfels(kSurfelZ, neighbor_surfel_indices[n]));
float3 surfel_to_neighbor = make_float3(
global_position.x - neighbor_position.x,
global_position.y - neighbor_position.y,
global_position.z - neighbor_position.z);
neighbor_distances_squared[n] = surfel_to_neighbor.x * surfel_to_neighbor.x + surfel_to_neighbor.y * surfel_to_neighbor.y + surfel_to_neighbor.z * surfel_to_neighbor.z;
}
}
constexpr int kDirectionsX[4] = {-1, 1, 0, 0};
constexpr int kDirectionsY[4] = {0, 0, -1, 1};
for (int direction = 0; direction < 4; ++ direction) {
u32 neighbor_index = supporting_surfels(y + kDirectionsY[direction], x + kDirectionsX[direction]);
if (neighbor_index != Surfel::kInvalidIndex &&
neighbor_index != surfel_index) {
// Check for closeness.
float3 this_to_neighbor = make_float3(surfels(kSurfelX, neighbor_index) - global_position.x,
surfels(kSurfelY, neighbor_index) - global_position.y,
surfels(kSurfelZ, neighbor_index) - global_position.z);
float distance_squared =
this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (distance_squared > radius_factor_for_regularization_neighbors_squared * radius_squared) {
neighbor_index = Surfel::kInvalidIndex;
}
if (neighbor_index != Surfel::kInvalidIndex) {
// Check for compatible normal.
float3 neighbor_normal =
make_float3(surfels(kSurfelNormalX, neighbor_index),
surfels(kSurfelNormalY, neighbor_index),
surfels(kSurfelNormalZ, neighbor_index));
float normal_dot = global_normal.x * neighbor_normal.x +
global_normal.y * neighbor_normal.y +
global_normal.z * neighbor_normal.z;
if (normal_dot <= 0) {
neighbor_index = Surfel::kInvalidIndex;
}
if (neighbor_index != Surfel::kInvalidIndex) {
// Check whether it is already a neighbor, or find the best insertion slot.
int best_n = -1;
float best_distance_squared = -1;
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
if (neighbor_index == neighbor_surfel_indices[n]) {
best_n = -1;
break;
} else if (neighbor_distances_squared[n] > best_distance_squared) {
best_n = n;
best_distance_squared = neighbor_distances_squared[n];
}
}
if (best_n >= 0 && distance_squared < best_distance_squared) {
neighbor_surfel_indices[best_n] = neighbor_index;
neighbor_distances_squared[best_n] = distance_squared;
}
}
}
}
}
// Write the neighbor indices back to global memory.
for (int n = 0; n < kSurfelNeighborCount; ++ n) {
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + n, surfel_index)) = neighbor_surfel_indices[n];
}
}
}
__global__ void UpdateNeighborsCUDARemoveReplacedNeighborsKernel(
u32 frame_index,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index != Surfel::kInvalidIndex) {
if (*reinterpret_cast<u8*>(&reinterpret_cast<uchar4*>(&surfels(kSurfelColor, neighbor_surfel_index))->w) == 1) {
// This neighbor has the neighbor detach request flag set. Remove it.
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index)) = Surfel::kInvalidIndex;
}
}
}
}
}
void UpdateNeighborsCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float radius_factor_for_regularization_neighbors,
const CUDABuffer<u32>& supporting_surfels,
const CUDABuffer<u32>& conflicting_surfels,
const PinholeCamera4f& depth_camera,
const SE3f& local_T_global,
float sensor_noise_factor,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
const CUDABuffer<float>& first_surfel_depth,
usize surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kSurfelsBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kSurfelsBlockWidth));
dim3 block_dim(kSurfelsBlockWidth);
UpdateNeighborsCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_integration_active_window_size,
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors,
supporting_surfels.ToCUDA(),
conflicting_surfels.ToCUDA(),
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
first_surfel_depth.ToCUDA(),
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
UpdateNeighborsCUDARemoveReplacedNeighborsKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__forceinline__ __device__ void RenderMinDepthAtPixel(
int x, int y,
const float3& cam_space_surfel_pos,
CUDABuffer_<float>& first_surfel_depth) {
// Should behave properly as long as all the floats are positive.
atomicMin(reinterpret_cast<int*>(&first_surfel_depth(y, x)), __float_as_int(cam_space_surfel_pos.z));
}
__global__ void RenderMinDepthCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
u32 surfel_count,
CUDABuffer_<float> surfels,
CUDABuffer_<float> first_surfel_depth) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= first_surfel_depth.width() || py >= first_surfel_depth.height()) {
return;
}
RenderMinDepthAtPixel(
px, py, local_position,
first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
bool integrate = true;
int offset_x;
int offset_y;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < first_surfel_depth.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < first_surfel_depth.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
if (integrate) {
RenderMinDepthAtPixel(
offset_x, offset_y, local_position,
first_surfel_depth);
}
}
}
void RenderMinDepthCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
RenderMinDepthCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
surfel_count,
surfels.ToCUDA(),
first_surfel_depth->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__device__ void ConsiderSurfelAssociationToPixel(
int x, int y,
const float3& cam_space_surfel_pos,
unsigned int surfel_index,
const CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
const CUDABuffer_<float2>& normals_buffer,
const CUDABuffer_<float>& radius_buffer,
CUDABuffer_<u32>& supporting_surfels,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<float>& supporting_surfel_depth_sums,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
return;
}
// Check if this or another surfel is conflicting.
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting.
conflicting_surfels(y, x) = surfel_index;
}
return;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
// Check if this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
return;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
// HACK: Avoid creation of a new surfel here in case there is no other conflicting or supporting surfel
// by setting conflicting_surfels(y, x) to an invalid index unequal to Surfel::kInvalidIndex.
// TODO: This can be harmful since it can prevent the creation of valid surfaces. Delete it?
// atomicCAS(&conflicting_surfels(y, x), Surfel::kInvalidIndex, Surfel::kInvalidIndex - 1);
return;
}
}
// The measurement seems to belong to the same surface as the surfel.
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (surfel_radius_squared <= 0) {
return;
}
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
// HACK: Avoid creation of a new surfel here in case there is no other conflicting or supporting surfel
// by setting conflicting_surfels(y, x) to an invalid index unequal to Surfel::kInvalidIndex.
atomicCAS(&conflicting_surfels(y, x), Surfel::kInvalidIndex, Surfel::kInvalidIndex - 1);
return;
}
}
// Replace the supporting surfel entry only if it was previously empty
atomicCAS(&supporting_surfels(y, x), Surfel::kInvalidIndex, surfel_index);
// Add to supporting surfel count for the pixel
atomicAdd(&supporting_surfel_counts(y, x), 1);
// Add to the supporting surfel depth sum for the pixel
atomicAdd(&supporting_surfel_depth_sums(y, x), cam_space_surfel_pos.z);
}
__global__ void AssociateSurfelsCUDAKernel(
u32 frame_index,
int surfel_integration_active_window_size,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
u32 surfel_count,
CUDABuffer_<float> surfels,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (!IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
return;
}
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z <= 0) {
// TODO: Compute z before x and y such that this early exit can be done earlier?
return;
}
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height()) {
return;
}
ConsiderSurfelAssociationToPixel(
px, py, local_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
float x_frac = pixel_pos.x - px;
float y_frac = pixel_pos.y - py;
bool integrate = true;
int offset_x;
int offset_y;
if (x_frac < y_frac) {
// Surfel is within the bottom-left triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the left side of the pixel.
if (px > 1) {
offset_x = px - 1;
offset_y = py;
} else {
integrate = false;
}
} else {
// Surfel is on the bottom side of the pixel.
if (py < depth_buffer.height() - 1) {
offset_x = px;
offset_y = py + 1;
} else {
integrate = false;
}
}
} else {
// Surfel is within the top-right triangle half of the pixel.
if (x_frac < 1 - y_frac) {
// Surfel is on the top side of the pixel.
if (py > 0) {
offset_x = px;
offset_y = py - 1;
} else {
integrate = false;
}
} else {
// Surfel is on the right side of the pixel.
if (px < depth_buffer.width() - 1) {
offset_x = px + 1;
offset_y = py;
} else {
integrate = false;
}
}
}
if (integrate) {
ConsiderSurfelAssociationToPixel(
offset_x, offset_y, local_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
}
}
}
void AssociateSurfelsCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
CUDABuffer<u32>* supporting_surfels,
CUDABuffer<u32>* supporting_surfel_counts,
CUDABuffer<float>* supporting_surfel_depth_sums,
CUDABuffer<u32>* conflicting_surfels,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
AssociateSurfelsCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
surfel_count,
surfels.ToCUDA(),
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
supporting_surfels->ToCUDA(),
supporting_surfel_counts->ToCUDA(),
supporting_surfel_depth_sums->ToCUDA(),
conflicting_surfels->ToCUDA(),
first_surfel_depth->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
constexpr int kMergeBlockWidth = 1024;
__device__ bool ConsiderSurfelMergeAtPixel(
int x, int y,
const float3& cam_space_surfel_pos,
const float3& global_surfel_pos,
unsigned int surfel_index,
CUDABuffer_<float>& surfels,
const CUDAMatrix3x4& local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
float depth_correction_factor,
const CUDABuffer_<u16>& depth_buffer,
const CUDABuffer_<float2>& normals_buffer,
const CUDABuffer_<float>& radius_buffer,
CUDABuffer_<u32>& supporting_surfels,
CUDABuffer_<u32>& supporting_surfel_counts,
CUDABuffer_<float>& supporting_surfel_depth_sums,
CUDABuffer_<u32>& conflicting_surfels,
CUDABuffer_<float>& first_surfel_depth) {
// Check whether the surfel falls on a depth pixel.
float measurement_depth = depth_correction_factor * depth_buffer(y, x);
if (measurement_depth <= 0) {
return false;
}
// Check if this or another surfel is conflicting.
const float first_surfel_depth_value = first_surfel_depth(y, x);
if (first_surfel_depth_value < (1 - sensor_noise_factor) * measurement_depth) {
// This or another surfel is conflicting.
if (first_surfel_depth_value == cam_space_surfel_pos.z) {
// This surfel is conflicting.
conflicting_surfels(y, x) = surfel_index;
}
return false;
}
// Determine the depth from which on surfels are considered to be occluded.
float occlusion_depth = (1 + sensor_noise_factor) * measurement_depth;
if (kProtectSlightlyOccludedSurfels) {
if (first_surfel_depth_value < occlusion_depth) {
// TODO: Would it be better to use the front surfel's radius for that?
occlusion_depth = (1 + kOcclusionDepthFactor) * first_surfel_depth_value;
}
}
// Check if this surfel is occluded.
if (cam_space_surfel_pos.z > occlusion_depth) {
// Surfel is occluded.
return false;
}
// The measurement supports the surfel. Determine whether they belong to the
// same surface (then the measurement should be integrated into the surfel),
// or to different surfaces (then the measurement must not be integrated).
// Check whether the surfel normal looks towards the camera (instead of away from it).
float surfel_distance = Norm(cam_space_surfel_pos);
float3 global_surfel_normal = make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
float3 local_surfel_normal = local_T_global.Rotate(global_surfel_normal);
float dot_angle = (1.0f / surfel_distance) * (cam_space_surfel_pos.x * local_surfel_normal.x +
cam_space_surfel_pos.y * local_surfel_normal.y +
cam_space_surfel_pos.z * local_surfel_normal.z);
if (dot_angle > kSurfelNormalToViewingDirThreshold) {
return false;
}
// Check whether the surfel normal is compatible with the measurement normal.
if (measurement_depth < cam_space_surfel_pos.z) {
float2 normal = normals_buffer(y, x);
float3 local_normal = make_float3(normal.x, normal.y, -sqrtf(::max(0.f, 1 - normal.x * normal.x - normal.y * normal.y)));
float dot_angle = local_surfel_normal.x * local_normal.x +
local_surfel_normal.y * local_normal.y +
local_surfel_normal.z * local_normal.z;
if (dot_angle < cos_normal_compatibility_threshold) {
return false;
}
}
// The measurement seems to belong to the same surface as the surfel.
// Check whether the observation scale is compatible with the surfel scale.
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
if (kCheckScaleCompatibilityForIntegration) {
const float observation_radius_squared = radius_buffer(y, x);
if (observation_radius_squared / surfel_radius_squared > kMaxObservationRadiusFactorForIntegration * kMaxObservationRadiusFactorForIntegration) {
return false;
}
}
// Never merge the supported surfel.
u32 supported_surfel = supporting_surfels(y, x);
if (supported_surfel == surfel_index || supported_surfel == Surfel::kInvalidIndex) {
return false;
}
// Compare the surfel to the supported surfel. Merge only if very similar.
// Radius:
const float other_radius_squared = surfels(kSurfelRadiusSquared, supported_surfel);
float radius_diff = surfel_radius_squared / other_radius_squared;
constexpr float kRadiusDiffThreshold = 1.2f;
constexpr float kRadiusDiffThresholdSq = kRadiusDiffThreshold * kRadiusDiffThreshold;
if (radius_diff > kRadiusDiffThresholdSq || radius_diff < 1 / kRadiusDiffThresholdSq) {
return false;
}
// Distance:
float3 other_global_position =
make_float3(surfels(kSurfelX, supported_surfel),
surfels(kSurfelY, supported_surfel),
surfels(kSurfelZ, supported_surfel));
float distance_squared = SquaredDistance(global_surfel_pos, other_global_position);
constexpr float kDistanceThresholdFactor = 0.5f * (0.25f * 0.25f);
if (distance_squared > kDistanceThresholdFactor * (surfel_radius_squared + other_radius_squared)) {
return false;
}
// Normal:
float3 other_surfel_normal = make_float3(surfels(kSurfelNormalX, supported_surfel),
surfels(kSurfelNormalY, supported_surfel),
surfels(kSurfelNormalZ, supported_surfel));
dot_angle = Dot(global_surfel_normal, other_surfel_normal);
constexpr float kCosNormalMergeThreshold = 0.93969f; // 20 degrees
if (dot_angle < kCosNormalMergeThreshold) {
return false;
}
// Merge the surfel.
*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index)) = 0;
surfels(kSurfelRadiusSquared, surfel_index) = -1;
*reinterpret_cast<u8*>(&reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index))->w) = 1; // Set neighbor detach request flag
return true;
}
__global__ void MergeSurfelsCUDAKernel(
u32 /*frame_index*/,
int /*surfel_integration_active_window_size*/,
float fx, float fy, float cx, float cy,
CUDAMatrix3x4 local_T_global,
float sensor_noise_factor,
float cos_normal_compatibility_threshold,
u32 surfel_count,
CUDABuffer_<float> surfels,
float depth_correction_factor,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float2> normals_buffer,
CUDABuffer_<float> radius_buffer,
CUDABuffer_<u32> supporting_surfels,
CUDABuffer_<u32> supporting_surfel_counts,
CUDABuffer_<float> supporting_surfel_depth_sums,
CUDABuffer_<u32> conflicting_surfels,
CUDABuffer_<float> first_surfel_depth,
CUDABuffer_<u32> num_merges_buffer) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
bool merged = false;
if (surfel_index < surfel_count) {
// if (IsSurfelActiveForIntegration(surfel_index, surfels, frame_index, surfel_integration_active_window_size)) {
if (surfels(kSurfelRadiusSquared, surfel_index) >= 0) {
float3 global_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 local_position = local_T_global * global_position;
if (local_position.z > 0) { // TODO: Compute z before x and y such that this early exit can be done earlier?
float2 pixel_pos =
make_float2(fx * (local_position.x / local_position.z) + cx,
fy * (local_position.y / local_position.z) + cy);
int px = static_cast<int>(pixel_pos.x);
int py = static_cast<int>(pixel_pos.y);
if (!(pixel_pos.x < 0 || pixel_pos.y < 0 ||
px < 0 || py < 0 ||
px >= depth_buffer.width() || py >= depth_buffer.height())) {
merged = ConsiderSurfelMergeAtPixel(
px, py, local_position, global_position,
surfel_index, surfels,
local_T_global,
sensor_noise_factor,
cos_normal_compatibility_threshold,
depth_correction_factor, depth_buffer, normals_buffer, radius_buffer, supporting_surfels,
supporting_surfel_counts, supporting_surfel_depth_sums, conflicting_surfels, first_surfel_depth);
}
}
}
}
typedef typename cub::BlockReduce<int, kMergeBlockWidth, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage temp_storage;
int num_merges = BlockReduceInt(temp_storage).Sum(merged ? 1 : 0);
if (threadIdx.x == 0 && num_merges > 0) {
atomicAdd(&num_merges_buffer(0, 0), static_cast<u32>(num_merges));
}
}
void MergeSurfelsCUDA(
cudaStream_t stream,
u32 frame_index,
int surfel_integration_active_window_size,
float sensor_noise_factor,
float normal_compatibility_threshold_deg,
const SE3f& local_T_global,
const PinholeCamera4f& depth_camera,
float depth_correction_factor,
const CUDABuffer<u16>& depth_buffer,
const CUDABuffer<float2>& normals_buffer,
const CUDABuffer<float>& radius_buffer,
CUDABuffer<u32>* supporting_surfels,
CUDABuffer<u32>* supporting_surfel_counts,
CUDABuffer<float>* supporting_surfel_depth_sums,
CUDABuffer<u32>* conflicting_surfels,
CUDABuffer<float>* first_surfel_depth,
u32 surfel_count,
u32* merge_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
const float fx = depth_camera.parameters()[0];
const float fy = depth_camera.parameters()[1];
const float cx = depth_camera.parameters()[2];
const float cy = depth_camera.parameters()[3];
dim3 grid_dim(GetBlockCount(surfel_count, kMergeBlockWidth));
dim3 block_dim(kMergeBlockWidth);
static CUDABuffer<u32> num_merges_buffer(1, 1); // TODO: do not use static
num_merges_buffer.Clear(0, stream);
MergeSurfelsCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
surfel_integration_active_window_size,
fx, fy, cx, cy,
CUDAMatrix3x4(local_T_global.matrix3x4()),
sensor_noise_factor,
cosf(M_PI / 180.0f * normal_compatibility_threshold_deg),
surfel_count,
surfels->ToCUDA(),
depth_correction_factor,
depth_buffer.ToCUDA(),
normals_buffer.ToCUDA(),
radius_buffer.ToCUDA(),
supporting_surfels->ToCUDA(),
supporting_surfel_counts->ToCUDA(),
supporting_surfel_depth_sums->ToCUDA(),
conflicting_surfels->ToCUDA(),
first_surfel_depth->ToCUDA(),
num_merges_buffer.ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
u32 num_merges = 0;
num_merges_buffer.DownloadAsync(stream, &num_merges);
cudaStreamSynchronize(stream);
*merge_count += num_merges;
}
__global__ void RegularizeSurfelsCUDAClearGradientsKernel(
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// TODO: Put this in the last kernel of the denoising (and in the
// initialization) and expect that it remains zero in-between the
// calls in order to save one kernel call? Is this used anywhere else?
surfels(kSurfelGradientX, surfel_index) = 0;
surfels(kSurfelGradientY, surfel_index) = 0;
surfels(kSurfelGradientZ, surfel_index) = 0;
surfels(kSurfelGradientCount, surfel_index) = 0;
}
}
__global__ void RegularizeSurfelsCUDAAccumulateNeighborGradientsKernel(
u32 frame_index,
int regularization_frame_window_size,
float radius_factor_for_regularization_neighbors_squared,
float regularizer_weight,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// Count neighbors.
int neighbor_count = 0;
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, neighbor_surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
continue;
}
++ neighbor_count;
}
if (neighbor_count == 0) {
return;
}
float3 smooth_position =
make_float3(surfels(kSurfelSmoothX, surfel_index),
surfels(kSurfelSmoothY, surfel_index),
surfels(kSurfelSmoothZ, surfel_index));
float3 normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
const float surfel_radius_squared = surfels(kSurfelRadiusSquared, surfel_index);
// Accumulate gradient terms for neighbors.
float factor = 2 * regularizer_weight / neighbor_count;
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, neighbor_surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
continue;
}
float3 neighbor_position =
make_float3(surfels(kSurfelSmoothX, neighbor_surfel_index),
surfels(kSurfelSmoothY, neighbor_surfel_index),
surfels(kSurfelSmoothZ, neighbor_surfel_index));
float3 this_to_neighbor =
make_float3(neighbor_position.x - smooth_position.x,
neighbor_position.y - smooth_position.y,
neighbor_position.z - smooth_position.z);
float factor_times_normal_dot_difference = factor * (normal.x * this_to_neighbor.x + normal.y * this_to_neighbor.y + normal.z * this_to_neighbor.z);
float3 gradient_term_for_neighbor =
make_float3(factor_times_normal_dot_difference * normal.x,
factor_times_normal_dot_difference * normal.y,
factor_times_normal_dot_difference * normal.z);
atomicAdd(&surfels(kSurfelGradientX, neighbor_surfel_index), gradient_term_for_neighbor.x);
atomicAdd(&surfels(kSurfelGradientY, neighbor_surfel_index), gradient_term_for_neighbor.y);
atomicAdd(&surfels(kSurfelGradientZ, neighbor_surfel_index), gradient_term_for_neighbor.z);
atomicAdd(&surfels(kSurfelGradientCount, neighbor_surfel_index), regularizer_weight / neighbor_count);
// If the neighbor is too far away, remove it.
// NOTE / TODO: it can still happen that there are far away but inactive
// neighbors, which will influence an active surfel, since
// this check only removes active neighbors.
// However, I think this should be relatively rare.
float neighbor_distance_squared = this_to_neighbor.x * this_to_neighbor.x + this_to_neighbor.y * this_to_neighbor.y + this_to_neighbor.z * this_to_neighbor.z;
if (neighbor_distance_squared > radius_factor_for_regularization_neighbors_squared * surfel_radius_squared) {
*reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index)) = Surfel::kInvalidIndex;
}
}
}
}
__global__ void RegularizeSurfelsCUDAKernel(
u32 frame_index,
int regularization_frame_window_size,
float regularizer_weight,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
float3 measured_position =
make_float3(surfels(kSurfelX, surfel_index),
surfels(kSurfelY, surfel_index),
surfels(kSurfelZ, surfel_index));
float3 smooth_position =
make_float3(surfels(kSurfelSmoothX, surfel_index),
surfels(kSurfelSmoothY, surfel_index),
surfels(kSurfelSmoothZ, surfel_index));
float3 normal =
make_float3(surfels(kSurfelNormalX, surfel_index),
surfels(kSurfelNormalY, surfel_index),
surfels(kSurfelNormalZ, surfel_index));
// Data term and neighbor-induced gradient terms
constexpr float data_term_factor = 2;
float3 gradient =
make_float3(data_term_factor * (smooth_position.x - measured_position.x) + surfels(kSurfelGradientX, surfel_index),
data_term_factor * (smooth_position.y - measured_position.y) + surfels(kSurfelGradientY, surfel_index),
data_term_factor * (smooth_position.z - measured_position.z) + surfels(kSurfelGradientZ, surfel_index));
// Regularization gradient terms
int neighbor_count = 0;
float3 regularization_gradient = make_float3(0, 0, 0);
for (int neighbor_index = 0; neighbor_index < kSurfelNeighborCount; ++ neighbor_index) {
u32 neighbor_surfel_index = *reinterpret_cast<u32*>(&surfels(kSurfelNeighbor0 + neighbor_index, surfel_index));
if (neighbor_surfel_index == Surfel::kInvalidIndex) {
continue;
}
++ neighbor_count;
float3 neighbor_position =
make_float3(surfels(kSurfelSmoothX, neighbor_surfel_index),
surfels(kSurfelSmoothY, neighbor_surfel_index),
surfels(kSurfelSmoothZ, neighbor_surfel_index));
float3 this_to_neighbor =
make_float3(neighbor_position.x - smooth_position.x,
neighbor_position.y - smooth_position.y,
neighbor_position.z - smooth_position.z);
float normal_dot_difference = normal.x * this_to_neighbor.x + normal.y * this_to_neighbor.y + normal.z * this_to_neighbor.z;
regularization_gradient =
make_float3(regularization_gradient.x - normal_dot_difference * normal.x,
regularization_gradient.y - normal_dot_difference * normal.y,
regularization_gradient.z - normal_dot_difference * normal.z);
}
if (neighbor_count > 0) {
// Apply constant factor to regularization gradient term
float factor = 2 * regularizer_weight / neighbor_count;
gradient =
make_float3(gradient.x + factor * regularization_gradient.x,
gradient.y + factor * regularization_gradient.y,
gradient.z + factor * regularization_gradient.z);
}
const float residual_terms_weight_sum = 1 + regularizer_weight + surfels(kSurfelGradientCount, surfel_index);
const float kStepSizeFactor = 0.5f / residual_terms_weight_sum;
// Avoid divergence by limiting the step length to a multiple of the surfel
// radius (multiple with this factor here).
// TODO: It seems that this is not necessary anymore now that the step size
// is more intelligently chosen. Remove it (after some more extensive
// testing).
constexpr float kMaxStepLengthFactor = 1.0f;
float max_step_length = kMaxStepLengthFactor * sqrtf(surfels(kSurfelRadiusSquared, surfel_index));
float step_length = kStepSizeFactor * sqrtf(gradient.x * gradient.x + gradient.y * gradient.y + gradient.z * gradient.z);
float step_factor = kStepSizeFactor;
if (step_length > max_step_length) {
step_factor = max_step_length / step_length * kStepSizeFactor;
}
// NOTE: Writing the update into the gradient first to avoid race conditions
// (the smooth position may still be used by neighboring surfel updates).
// The next kernel call will move the result to the smooth position field.
surfels(kSurfelGradientX, surfel_index) = smooth_position.x - step_factor * gradient.x;
surfels(kSurfelGradientY, surfel_index) = smooth_position.y - step_factor * gradient.y;
surfels(kSurfelGradientZ, surfel_index) = smooth_position.z - step_factor * gradient.z;
}
}
__global__ void RegularizeSurfelsCUDAUpdateKernel(
u32 frame_index,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
surfels(kSurfelSmoothX, surfel_index) = surfels(kSurfelGradientX, surfel_index);
surfels(kSurfelSmoothY, surfel_index) = surfels(kSurfelGradientY, surfel_index);
surfels(kSurfelSmoothZ, surfel_index) = surfels(kSurfelGradientZ, surfel_index);
}
}
__global__ void RegularizeSurfelsCUDACopyOnlyKernel(
u32 frame_index,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer_<float> surfels) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
// TODO: Only changed surfels need to be touched here.
if (static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelLastUpdateStamp, surfel_index))) < static_cast<int>(frame_index - regularization_frame_window_size)) {
return;
}
surfels(kSurfelSmoothX, surfel_index) = surfels(kSurfelX, surfel_index);
surfels(kSurfelSmoothY, surfel_index) = surfels(kSurfelY, surfel_index);
surfels(kSurfelSmoothZ, surfel_index) = surfels(kSurfelZ, surfel_index);
}
}
void RegularizeSurfelsCUDA(
cudaStream_t stream,
bool disable_denoising,
u32 frame_index,
float radius_factor_for_regularization_neighbors,
float regularizer_weight,
int regularization_frame_window_size,
u32 surfel_count,
CUDABuffer<float>* surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
if (disable_denoising) {
// Only copy the raw surfel positions to the smoothed position fields.
RegularizeSurfelsCUDACopyOnlyKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
regularization_frame_window_size,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
return;
}
RegularizeSurfelsCUDAClearGradientsKernel
<<<grid_dim, block_dim, 0, stream>>>(
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
RegularizeSurfelsCUDAAccumulateNeighborGradientsKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
regularization_frame_window_size,
radius_factor_for_regularization_neighbors * radius_factor_for_regularization_neighbors,
regularizer_weight,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
RegularizeSurfelsCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
regularization_frame_window_size,
regularizer_weight,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
RegularizeSurfelsCUDAUpdateKernel
<<<grid_dim, block_dim, 0, stream>>>(
frame_index,
regularization_frame_window_size,
surfel_count,
surfels->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void ExportVerticesCUDAKernel(
u32 surfel_count,
CUDABuffer_<float> surfels,
CUDABuffer_<float> position_buffer,
CUDABuffer_<u8> color_buffer) {
unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (surfel_index < surfel_count) {
bool merged = surfels(kSurfelRadiusSquared, surfel_index) < 0;
float* position_ptr = position_buffer.address();
position_ptr[3 * surfel_index + 0] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothX, surfel_index);
position_ptr[3 * surfel_index + 1] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothY, surfel_index);
position_ptr[3 * surfel_index + 2] = merged ? CUDART_NAN_F : surfels(kSurfelSmoothZ, surfel_index);
const uchar4 color = *(reinterpret_cast<uchar4*>(&surfels(kSurfelColor, surfel_index)));
u8* color_ptr = color_buffer.address();
color_ptr[3 * surfel_index + 0] = color.x;
color_ptr[3 * surfel_index + 1] = color.y;
color_ptr[3 * surfel_index + 2] = color.z;
}
}
void ExportVerticesCUDA(
cudaStream_t stream,
u32 surfel_count,
const CUDABuffer<float>& surfels,
CUDABuffer<float>* position_buffer,
CUDABuffer<u8>* color_buffer) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
if (surfel_count == 0) {
return;
}
constexpr int kBlockWidth = 1024;
dim3 grid_dim(GetBlockCount(surfel_count, kBlockWidth));
dim3 block_dim(kBlockWidth);
ExportVerticesCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
surfel_count,
surfels.ToCUDA(),
position_buffer->ToCUDA(),
color_buffer->ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
__global__ void DebugPrintSurfelCUDAKernel(
usize surfel_index,
CUDABuffer_<float> surfels) {
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("DEBUGGING surfel %i on GPU ...\n", static_cast<int>(surfel_index));
printf("DEBUG surfel raw position x on GPU: %f\n", surfels(kSurfelX, surfel_index));
printf("DEBUG surfel raw position y on GPU: %f\n", surfels(kSurfelY, surfel_index));
printf("DEBUG surfel raw position z on GPU: %f\n", surfels(kSurfelZ, surfel_index));
printf("DEBUG surfel smooth position x on GPU: %f\n", surfels(kSurfelSmoothX, surfel_index));
printf("DEBUG surfel smooth position y on GPU: %f\n", surfels(kSurfelSmoothY, surfel_index));
printf("DEBUG surfel smooth position z on GPU: %f\n", surfels(kSurfelSmoothZ, surfel_index));
printf("DEBUG surfel creation stamp on GPU: %i\n", static_cast<int>(*reinterpret_cast<u32*>(&surfels(kSurfelCreationStamp, surfel_index))));
}
}
void DebugPrintSurfelCUDA(
cudaStream_t stream,
usize surfel_index,
const CUDABuffer<float>& surfels) {
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
constexpr int kBlockWidth = 32;
dim3 grid_dim(1);
dim3 block_dim(kBlockWidth);
DebugPrintSurfelCUDAKernel
<<<grid_dim, block_dim, 0, stream>>>(
surfel_index,
surfels.ToCUDA());
#ifdef CUDA_SEQUENTIAL_CHECKS
cudaDeviceSynchronize();
#endif
CHECK_CUDA_NO_ERROR();
}
}
|
10c1f81c5c8436ce4046b59d17f38c9b038cb35e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 4
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 4
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void transpose(float * A, float * C, int width)
{
__shared__ float shared_0[32][33];
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
float sum_3 = 0;
int it_2;
#pragma unroll
for (it_2=0; it_2<32; it_2=(it_2+4))
{
shared_0[(it_2+(tidy*1))][tidx]=A(((idx+(( - 1)*tidx))+(it_2+(tidy*1))), (coalesced_idy+tidx));
}
__syncthreads();
sum_0=shared_0[tidx][((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))];
sum_1=shared_0[tidx][((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))];
sum_2=shared_0[tidx][((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))];
sum_3=shared_0[tidx][((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))];
__syncthreads();
__syncthreads();
{
C((((bidy*16)+tidy)+0), idx)=sum_0;
}
{
C((((bidy*16)+tidy)+4), idx)=sum_1;
}
{
C((((bidy*16)+tidy)+8), idx)=sum_2;
}
{
C((((bidy*16)+tidy)+12), idx)=sum_3;
}
}
| 10c1f81c5c8436ce4046b59d17f38c9b038cb35e.cu | #define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 4
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 4
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void transpose(float * A, float * C, int width)
{
__shared__ float shared_0[32][33];
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
float sum_3 = 0;
int it_2;
#pragma unroll
for (it_2=0; it_2<32; it_2=(it_2+4))
{
shared_0[(it_2+(tidy*1))][tidx]=A(((idx+(( - 1)*tidx))+(it_2+(tidy*1))), (coalesced_idy+tidx));
}
__syncthreads();
sum_0=shared_0[tidx][((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))];
sum_1=shared_0[tidx][((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))];
sum_2=shared_0[tidx][((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))];
sum_3=shared_0[tidx][((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))];
__syncthreads();
__syncthreads();
{
C((((bidy*16)+tidy)+0), idx)=sum_0;
}
{
C((((bidy*16)+tidy)+4), idx)=sum_1;
}
{
C((((bidy*16)+tidy)+8), idx)=sum_2;
}
{
C((((bidy*16)+tidy)+12), idx)=sum_3;
}
}
|
819adb462ab6bb7f215791bd2988fedfd8c5f75f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
realize_mod.c
Takes a struct mod_t model and "realizes" its components as polyhedral solids made up of
triangular facets.
Modified 2016 July 9 by Matthias Engels:
Adapted for use with shape-cuda.
------------------------------------------------------------------------------------------
Modified 2014 April 26 by CM:
Increase the minimum permitted value of the highest-order coefficient in the cubic
equation that locates an ovoid vertex: if the coefficient is smaller than this
minimum, treat it as if it's zero and solve a quadratic equation instead
Modified 2014 March 22 by CM:
Relax the tolerance for finding a valid ovoid vertex position
Modified 2014 March 10 by CM:
Guard against roundoff problems when computing vertex positions for ovoid components
with very small |k|
Modified 2014 February 10 by CM:
Implement multiple radar and optical scattering laws
Modified 2013 August 28 by CM:
Set the bad diameter flag for harmonic components with tiny or negative vertex
displacements, and for harmonic and vertex components with tiny or negative
"scale factor" values
Modified 2013 June 2 by CM:
In the cubic_realroot routine, initialize nrealroots to avoid compilation warning
Fix a comment
Modified 2013 May 20 by CM:
Implement ovoid shape components
Modified 2012 July 4 by CM:
Add test in "realize_coordinates" routine to avoid compilation warning
Modified 2011 September 2 by CM:
Bug fix: the "check_surface" routine makes use of facet normals when identifying
active vs. inactive vertices and facets, but facet normals weren't being computed
until *after* check_surface was called
Make the code more modular (and address the above bug) by introducing the
"realize_coordinates" and "compute_moments" routines, as per the version of
realize_mod in the SHERMAN package
Store the area and the centroid coordinates of each facet
Add "harmlambert" optical scattering law (compute facet angular coordinates)
Modified 2010 September 1 by CM:
Add "facetnorm" argument to the rayfacint routine
Modified 2010 June 1 by CM:
Change "scalefactor" parameter from a scalar to a 3-component vector
Modified 2010 March 19 by CM:
Implement '=' state for vertex deviations
Modified 2009 November 15 by CM:
In the "check_surface" routine, eliminate an unused variable and fix
a couple of ambiguous nested if-then-else statements
Modified 2009 August 3 by CM:
For the "harmlommel" "harmhapke" "harmkaas" and "harmcosine_diff"
inhomogeneous scattering laws, compute the spherical coordinates
(theta and phi) of each facet after each component's rotational and
translational offsets have been applied rather than before, so that
these laws can be used for multiple-component models
For multiple-component models, use a more careful method (already used
for facets) to determine which vertices are on the model's surface;
also, for both vertices and facets, allow for a bit of roundoff
error in this determination by adding a tolerance argument to the
"rayfacint" routine
For multiple-component models, determine the new "act" (active) flag
for each model side
For multiple-component models, fix a bug in computing the center of mass
for individual components
Modified 2009 July 5 by CM:
Turn each component's rotational offsets into a rotation matrix here
rather than in the "read_mod" routine, in case the offsets are
being allowed to float
Modified 2009 July 1 by CM:
Add "check_surface" routine that determines which facets of a
multiple-component model lie on the model's surface rather than
interior to the model
For multiple-component models, when computing the area and the moments
of the overall model, ignore facets that lie interior to the model
Modified 2009 April 3 by CM:
Fix slight bug in defining function a[i] = 1/radius^2 when a/b or b/c
is tiny or negative for ellipsoid components
Initialize the "baddiam_logfactor" parameter and set its value when
2a, a/b, or b/c is tiny or negative for ellipsoid components
Modified 2007 August 10 by CM:
Eliminate unused variable
Modified 2007 January 8 by CM:
Define "scalefactor" state for vertex realizations of ellipsoid and
harmonic components, not just its value
Modified 2006 October 1 by CM:
Add "scalefactor" to harmonic and vertex shape structures
Replace ellipsoid diameters D with two_a, a_over_b, b_over_c
Modified 2005 September 6 by CM:
Add computation of facet angular coordinates for use with harmonic
scattering laws
Modified 2005 August 17 by CM:
Move computation of spherical harmonic functions afactor and bfactor
from here to read_mod.c, so that it can be done just once per fit
Modified 2005 February 28 by CM:
Initialize the "baddiam" parameter (flag indicating tiny or negative
ellipsoid diameters) to 0 here rather than in bestfit.c so that
it can be used for actions other than "fit"
Modified 2004 August 23 by CM:
Eliminated newtheta and oldcostheta variables and THETATOL constant,
since they weren't actually being used (i.e., the test in which
they were included was always true)
Modified 2003 April 17 by CM:
Added computation of component and model moments; this used to
be done in function penalties (but wasn't always being done)
Added code to cope with tiny or negative ellipsoid diameters;
as a result, must now pass the model's parameter structure
as an argument to realize_mod
Added surface area computation for components and for the full model
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
#define HAIRWIDTH 1.0e-7
#define SMALLRATIO 0.01
#define SMALLOVOIDK1 0.01
#define SMALLOVOIDK2 1.0e-6
#define OVOIDTOL 1.0e-6
#define MAXEDGE 100
#define EDGETOL 1.0e-14
#define RTOL 1000*EDGETOL
#define SMALLCOEFF3 1.0e-5
/* These 2 device variables are to get nf and nv from the GPU-located dmod file */
__device__ int dnv, dnf, dns;
__device__ double d_a[3];
__device__ double a_radius, a_over_b, b_over_c, k_asym, x0term, numer, denom, x0;
__device__ int harmonic_scatlaw;
static int nv, nf, ns;
static dim3 nvBLK,nvTHD,nfBLK,nfTHD,nsBLK;
__host__ void realize_coordinates_gpu(struct par_t *dpar, struct mod_t *dmod, unsigned char type, int gpuid);
__host__ void compute_moments_gpu(struct mod_t *dmod, int nf, hipStream_t *cm_streams);
__global__ void set_diam_krnl(struct par_t *dpar, struct mod_t *dmod, int gpuid, int gpu0){
/* This is a single-thread kernel */
if (threadIdx.x == 0) {
if (gpuid==gpu0) {
dpar->baddiam = 0;
dpar->baddiam_logfactor = 0;
}
dnv = dmod->shape.comp[0].real.nv;
dnf = dmod->shape.comp[0].real.nf;
dns = dmod->shape.comp[0].real.ns;
}
}
__global__ void ellipse_diameter_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* This is a single-thread kernel */
double diam, diamratio;
if (threadIdx.x == 0) {
diam = dmod->shape.comp[0].desc.ell.two_a.val;
if (diam > HAIRWIDTH) {
d_a[0] = 2.0/diam; /* 1/radii */
} else {
d_a[0] = (2.0/HAIRWIDTH) * (1 + HAIRWIDTH - diam);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - diam);
}
diam = (2.0/d_a[0]);
diamratio = dmod->shape.comp[0].desc.ell.a_over_b.val;
if (diamratio > SMALLRATIO) {
d_a[1] = 2.0/(diam/diamratio);
} else {
d_a[1] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio);
}
diam = (2.0/d_a[1]);
diamratio = dmod->shape.comp[0].desc.ell.b_over_c.val;
if (diamratio > SMALLRATIO) {
d_a[2] = 2.0/(diam/diamratio);
} else {
d_a[2] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio);
}
d_a[0] *= d_a[0];
d_a[1] *= d_a[1];
d_a[2] *= d_a[2];
}
}
__global__ void ellipse_distance_krnl(struct par_t *dpar, struct mod_t *dmod) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int j;
double den;
if (offset < dmod->shape.comp[0].real.nv) {
/* Routine setuprealver (called by setupreal, which was called by
* read_mod) already created as many ellipsoid vertices as were needed
* for specified value of theta_steps, and initialized direction
* cosines u[j] for each vertex to be
* sin(theta)cos(phi), sin(theta)sin(phi), and cos(theta) for
* j=0, 1, and 2, respectively.
*
* These values are x/r, y/r, and z/r, where r is distance from origin
* to ellipsoid surface along direction (theta, phi) for given vertex.
* Since an ellipsoid has (x/a)^2 + (y/b)^2 + (z/c)^2 = 1, quantity
* "den" in code below is equal to 1/(r^2) for vertex i.
*
* Note that setuprealver initialized all vertex "base points" a[j] to
* be zero for ellipsoid components; hence "deviation" r is in fact the
* entire thing. */
den = 0.0;
for (j=0; j<=2; j++)
den += d_a[j]*( dmod->shape.comp[0].real.v[offset].u[j]
* dmod->shape.comp[0].real.v[offset].u[j] );
dmod->shape.comp[0].real.v[offset].r.val = 1/sqrt(den);
}
}
__global__ void ellipse_scalefactor_krnl(struct mod_t *dmod) {
/* Single-threaded kernel */
int j;
if (threadIdx.x == 0) {
dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ell.two_a.state;
dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ell.a_over_b.state;
dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ell.b_over_c.state;
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.scalefactor[j].val = 1.0;
}
}
__global__ void set_ovoid_parameters_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
/* Determine all shape parameters, making sure that none are out of bounds */
a_radius = dmod->shape.comp[0].desc.ovoid.two_a.val / 2;
if (a_radius <= HAIRWIDTH/2) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*a_radius);
a_radius = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*a_radius);
}
a_over_b = dmod->shape.comp[0].desc.ovoid.a_over_b.val;
if (a_over_b <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - a_over_b);
a_over_b = SMALLRATIO / (1 + SMALLRATIO - a_over_b);
}
b_over_c = dmod->shape.comp[0].desc.ovoid.b_over_c.val;
if (b_over_c <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - b_over_c);
b_over_c = SMALLRATIO / (1 + SMALLRATIO - b_over_c);
}
k_asym = dmod->shape.comp[0].desc.ovoid.k.val;
if (fabs(k_asym) > 1 - SMALLVAL) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(fabs(k_asym) + SMALLVAL);
if (k_asym > 0.0)
k_asym = 1 - SMALLVAL*(1 - SMALLVAL)/k_asym;
else
k_asym = -1 - SMALLVAL*(1 - SMALLVAL)/k_asym;
}
/* Compute x0, the x-offset that places the ovoid's center of mass at the
* origin; for small |k|, use an analytical approximation to avoid
* roundoff problems */
if (fabs(k_asym) > SMALLOVOIDK1) {
x0term = 3*(1 - k_asym*k_asym)*log((1 + k_asym)/(1 - k_asym));
numer = 2.0*k_asym*(3 - 2*k_asym*k_asym) - x0term;
denom = 2.0*k_asym*(3 - k_asym*k_asym) - x0term;
x0 = (a_radius/k_asym)*(numer/denom);
} else {
x0 = 0.4*k_asym*a_radius;
}
}
}
__global__ void ovoid_distance_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, k, nrealroots;
double a_over_c, h, alpha0, u_x, coeff[4], goodroot, realroot[3], x_over_a;
if (i < dmod->shape.comp[0].real.nv) {
a_over_c = a_over_b*b_over_c;
h = a_over_b*a_over_b*dmod->shape.comp[0].real.v[i].u[1]
*dmod->shape.comp[0].real.v[i].u[1] + a_over_c*a_over_c
*dmod->shape.comp[0].real.v[i].u[2]*dmod->shape.comp[0].real.v[i].u[2];
alpha0 = x0/a_radius;
u_x = dmod->shape.comp[0].real.v[i].u[0];
coeff[3] = (h - u_x*u_x)*k_asym*u_x;
coeff[2] = (1 + 3*k_asym*alpha0)*u_x*u_x + h*(1 - k_asym*alpha0);
coeff[1] = (k_asym - (2 + 3*k_asym*alpha0)*alpha0)*u_x;
coeff[0] = -(1 - alpha0*alpha0)*(1 + k_asym*alpha0);
if (fabs(k_asym) <= SMALLOVOIDK2) {
/* |k| is very small, so guard against roundoff error by
* computing the vertex position for an ellipsoid (k = 0) and then
* applying a first-order correction for nonzero k */
goodroot = 1/sqrt(u_x*u_x + h);
goodroot -= (coeff[3]*goodroot*goodroot*goodroot + coeff[1]*goodroot)
/ (3*coeff[3]*goodroot*goodroot + 2*coeff[2]*goodroot + coeff[1]);
} else {
/* |k| isn't very small, so solve the cubic equation */
nrealroots = cubic_realroots_cuda( coeff, realroot);
goodroot = -HUGENUMBER;
for (k=0; k<nrealroots; k++)
if (realroot[k] >= 0.0) {
x_over_a = realroot[k]*u_x;
if (fabs(x_over_a - alpha0) - 1 < OVOIDTOL)
goodroot = MAX( goodroot, realroot[k]);
}
}
if (goodroot < 0.0)
printf("Can't compute vertex displacement for ovoid vertex %d\n", i);
dmod->shape.comp[0].real.v[i].r.val = goodroot*a_radius;
/* Assign scalefactor values */
dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ovoid.two_a.state;
dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ovoid.a_over_b.state;
dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ovoid.b_over_c.state;
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.scalefactor[j].val = 1.0;
}
}
__global__ void harmonic_krnl(struct par_t *dpar, struct mod_t *dmod){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int L, l, m;
double r;
if (i < dnv) {
L = dmod->shape.comp[0].desc.har.nhar;
r = 0.0;
for (l=0; l<=L; l++) {
r += dmod->shape.comp[0].desc.har.a[l][0].val
* dmod->shape.comp[0].real.v[i].afactor[l][0];
for (m=1; m<=l; m++)
r += dmod->shape.comp[0].desc.har.a[l][m].val
* dmod->shape.comp[0].real.v[i].afactor[l][m]
+ dmod->shape.comp[0].desc.har.b[l][m].val
* dmod->shape.comp[0].real.v[i].bfactor[l][m];
}
if (r > HAIRWIDTH/2) {
dmod->shape.comp[0].real.v[i].r.val = r;
} else {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*r) / ((L+1)*(L+1));
dmod->shape.comp[0].real.v[i].r.val = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*r);
}
}
}
__global__ void harmonic_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) {
// This is a 3-thread single thread kernel
int j = threadIdx.x;
if (j < 3){
if (j > 0 && dmod->shape.comp[0].desc.har.scalefactor[j].state == '=')
dmod->shape.comp[0].desc.har.scalefactor[j].val
= dmod->shape.comp[0].desc.har.scalefactor[j-1].val;
dmod->shape.comp[0].real.scalefactor[j].state = dmod->shape.comp[0].desc.har.scalefactor[j].state;
dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j].val;
if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO
/ (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
}
}
}
__global__ void vertex_update_dev_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_mirror;
if (i < dmod->shape.comp[0].real.nv) {
if (dmod->shape.comp[0].real.v[i].r.state == '=') {
v_mirror = dmod->shape.comp[0].real.v[i].v_mirror;
dmod->shape.comp[0].real.v[i].r.val =
dmod->shape.comp[0].real.v[v_mirror].r.val;
}
}
}
__global__ void vertex_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) {
// This is a 3-thread single thread kernel
//int j = threadIdx.x;
if (threadIdx.x == 0) {
for (int j=0; j<=2; j++) {
if (j > 0 && dmod->shape.comp[0].desc.ver.scalefactor[j].state == '=')
dmod->shape.comp[0].desc.ver.scalefactor[j].val
= dmod->shape.comp[0].desc.ver.scalefactor[j-1].val;
dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j].val;
if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO
/ (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
}
}
}
__syncthreads();
}
__global__ void calc_vertex_co_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
if (i < dnv){
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] = dmod->shape.comp[0].real.scalefactor[j].val
* (dmod->shape.comp[0].real.v[i].u[j] * dmod->shape.comp[0].real.v[i].r.val
+ dmod->shape.comp[0].real.v[i].a[j]);
}
}
__global__ void perform_rotation_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x[3];
int j, k, test;
if (i == 0) {
if (!(dmod->shape.comp[0].rot[0].val == 0 &&
dmod->shape.comp[0].rot[1].val == 0 &&
dmod->shape.comp[0].rot[2].val == 0 ))
test = 1;
}
__syncthreads();
if (i < dnv && test == 1) {
for (j=0; j<=2; j++) {
x[j] = 0.0;
for (k=0; k<=2; k++)
x[j] += dmod->shape.comp[0].m[j][k] * dmod->shape.comp[0].real.v[i].x[k];
}
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] = x[j];
}
}
__global__ void perform_translation_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, test;
if (i == 0) {
if (!(dmod->shape.comp[0].off[0].val == 0.0 &&
dmod->shape.comp[0].off[1].val == 0.0 &&
dmod->shape.comp[0].off[2].val == 0.0 ))
test = 1;
}
__syncthreads();
if (i < dnv && test == 1){
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] += dmod->shape.comp[0].off[j].val;
}
}
__global__ void set_optical_params_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* Single-thread kernel */
int ilaw;
harmonic_scatlaw = 0;
if (threadIdx.x == 0) {
for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++)
if (dmod->photo.opttype[ilaw] == HARMLAMBERT || dmod->photo.opttype[ilaw] == HARMLOMMEL
|| dmod->photo.opttype[ilaw] == HARMHAPKE
|| dmod->photo.opttype[ilaw] == HARMKAAS)
harmonic_scatlaw = 1;
for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++)
if (dmod->photo.radtype[ilaw] == HARMCOSINE_DIFF)
harmonic_scatlaw = 1;
}
}
__global__ void calc_vertex_nrmls_krnl(struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
double n[3];
int j, k, naf, f;
if (i < dmod->shape.comp[0].real.nv){
n[0] = n[1] = n[2] = 0.0;
naf = dmod->shape.comp[0].real.v[i].naf;
for (j=0; j<naf; j++) {
f = dmod->shape.comp[0].real.v[i].af[j];
n[0] += dmod->shape.comp[0].real.f[f].n[0];
n[1] += dmod->shape.comp[0].real.f[f].n[1];
n[2] += dmod->shape.comp[0].real.f[f].n[2];
}
dev_normalize( n);
for (k=0; k<=2; k++)
dmod->shape.comp[0].real.v[i].n[k] = n[k];
}
}
__global__ void facet_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* For each facet of this component, compute the outward unit normal,
* the area, the mean coordinates of the three corner vertices, and
* the corresponding angular coordinates (for some scattering laws) */
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int j;
if (f < dnf) {
dmod->shape.comp[0].real.f[f].area = dev_facnrm(dmod->shape.comp[0].real, f);
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.f[f].x[j] = (dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x[j] +
dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x[j] +
dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x[j] )/3;
if (harmonic_scatlaw) {
dmod->shape.comp[0].real.f[f].theta = atan2( sqrt(dmod->shape.comp[0].real.f[f].x[0]*dmod->shape.comp[0].real.f[f].x[0] +
dmod->shape.comp[0].real.f[f].x[1]*dmod->shape.comp[0].real.f[f].x[1] ),
dmod->shape.comp[0].real.f[f].x[2]);
dmod->shape.comp[0].real.f[f].phi = atan2( dmod->shape.comp[0].real.f[f].x[1], dmod->shape.comp[0].real.f[f].x[0]);
}
}
}
__global__ void set_real_active_vert_krnl(struct mod_t *dmod) {
/* nv-threaded kernel */
int v = blockIdx.x * blockDim.x + threadIdx.x;
if (v < dmod->shape.comp[0].real.nv) //dmod->shape.comp[0].real.nv)
dmod->shape.comp[0].real.v[v].act = 1;
}
__global__ void set_real_active_facet_krnl(struct mod_t *dmod) {
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
if (f < dmod->shape.comp[0].real.nf)
dmod->shape.comp[0].real.f[f].act = 1;
}
__global__ void set_real_active_side_krnl(struct mod_t *dmod) {
/* ns-threaded kernel */
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < dmod->shape.comp[0].real.ns)
dmod->shape.comp[0].real.s[k].act = 1;
}
__host__ void realize_mod_gpu( struct par_t *dpar, struct mod_t *dmod,
unsigned char type, int nf, hipStream_t *rm_streams) {
/* We need to realize each model component as a polyhedral solid with
triangular facets. The first step is to call realize_coordinates,
which computes the displacement of each vertex in this realization,
represented as a base displacement plus a vertex deviation (either
positive or negative) along a specified set of direction cosines.
Additionally, for each facet it computes the outward unit normal,
the area, the mean coordinates of the corner vertices, and (for
some scattering laws) the corresponding angular coordinates. */
realize_coordinates_gpu(dpar, dmod, type, GPU0);
/* For multiple-component models, figure out which facets lie on
the model's surface and which fall within some other component;
such facets will have their "act" (active) flag reset to zero. */
check_surface_gpu(dmod, rm_streams);
/* Compute the area and moments (volume, center of mass, and
inertia tensor) of each component and of the overall model */
compute_moments_gpu(dmod, nf, rm_streams);
}
/* Compute the vertex coordinates and (if necessary) facet angular coordinates
for each component of the model's vertex realization */
__host__ void realize_coordinates_gpu( struct par_t *dpar, struct mod_t *dmod, unsigned char type, int gpuid){
dim3 THD;
THD.x = maxThreadsPerBlock;
/* Loop over all model components, realizing each one as a polyhedral solid
* with triangular facets. Compute displacement of each vertex in this
* realization, represented as a base displacement plus a vertex deviation
* (positive or negative) along a specified set of direction cosines*/
/* Call Kernel to initialize flag for tiny/negative ellipsoid diameters */
hipLaunchKernelGGL(( set_diam_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, gpuid, GPU0);//, dnv, dnf);
checkErrorAfterKernelLaunch("set_diam_krnl");
/* Note: The CUDA-code assumes a single-component model for now. */
/* Loop over all model components, realizing each one as a polyhedral solid
* with triangular facets. Compute the displacement of each vertex in this
* realization, represented as a base displacement plus a vertex deviation
* (positive or negative) along a specified set of direction cosines. */
/* Copy nf and nv back from device copies dnf and dnv; used as launch
* parameters below */
gpuErrchk(hipMemcpyFromSymbol(&nv, dnv, sizeof(nv), 0, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpyFromSymbol(&nf, dnf, sizeof(nv), 0, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpyFromSymbol(&ns, dns, sizeof(nv), 0, hipMemcpyDeviceToHost));
/* Calculate launch parameters for kernels going over vertices (nvBLK) and
* facets (nfBLK) */
nvTHD.x = nfTHD.x = THD.x = maxThreadsPerBlock;
nvBLK.x = floor((THD.x - 1 + nv) / THD.x);
nfBLK.x = floor((THD.x - 1 + nf) / THD.x);
/* Check component type & create corresponding vertex realization. */
switch (type) {
case ELLIPSE:
/* To avoid negative diameters/very small positive diameters,
* adjust the function a[i] = 1/radius[i]^2 so it monotonically
* increases as diameter[i] decreases through zero and beyond,
* rather than being symmetric about zero diameter. Also set flag
* "baddiam" when any diameter is very small or negative, so that
* extra penalties can later be applied to this model. */
/* Launch ellipse diameter kernel */
hipLaunchKernelGGL(( ellipse_diameter_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("ellipse_diameter_krnl");
/* Kernel finds distance of each vertex to ellipsoid's center */
hipLaunchKernelGGL(( ellipse_distance_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("ellipse_distance_krnl");
/* Launch kernel to set real->scalefactor */
hipLaunchKernelGGL(( ellipse_scalefactor_krnl), dim3(1),dim3(1), 0, 0, dmod);
checkErrorAfterKernelLaunch("ellipse_scalefactor_krnl");
break;
case OVOID:
/* Determine all shape parameters, making sure that none are out of bounds */
hipLaunchKernelGGL(( set_ovoid_parameters_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("set_ovoid_parameters_krnl");
/* Kernel finds distance of each vertex to ovoid's center */
hipLaunchKernelGGL(( ovoid_distance_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("ovoid_distance_krnl");
break;
case HARMONIC:
/* Kernel sets parameters associated with harmonic model */
hipLaunchKernelGGL(( harmonic_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("harmonic_krnl");
THD.x = 3;
hipLaunchKernelGGL(( harmonic_scalefactor_krnl), dim3(1),dim3(THD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("harmonic_scalefactor_krnl");
break;
case VERTEX:
/* The vertex type is its own realization, but we still need to update
* the values of the "scale factor" parameters and update any vertex
* deviations that have the '=' state */
hipLaunchKernelGGL(( vertex_update_dev_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("vertex_update_dev_kernel");
THD.x = 3;
hipLaunchKernelGGL(( vertex_scalefactor_krnl), dim3(1),dim3(THD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("vertex_scalefactor_krnl");
break;
default:
printf("realize_mod.c: don't know that component type\n");
} /* end of switch statement for component type */
/* Calculate vertex coordinates for this component */
hipLaunchKernelGGL(( calc_vertex_co_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("calc_vertex_co_krnl");
/* Use this component's rotational offset angles to create comp[c].m, the
* rotation matrix that will be applied to the vertex coordinates */
hipLaunchKernelGGL(( euler2mat_realize_mod_krnl), dim3(1),dim3(1), 0, 0, dmod);
checkErrorAfterKernelLaunch("dev_euler2mat");
/* If needed, perform rotation on this component */
hipLaunchKernelGGL(( perform_rotation_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("perform_rotation_krnl");
/* If needed, perform translation on this component */
hipLaunchKernelGGL(( perform_translation_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("perform_translation_krnl, line 651");
/* Figure out if optical/radar harmonic scattering laws are in use *
* and set the flag harmonic_scatlaw accordingly */
hipLaunchKernelGGL(( set_optical_params_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("set_optical_params_krnl, line 656");
/* For each facet of this component, compute outward unit normal, area,
* mean coordinates of the three corner vertices, and corresponding angular
* coordinates (for some scattering laws) */
hipLaunchKernelGGL(( facet_krnl), dim3(nfBLK),dim3(nfTHD), 0, 0, dpar, dmod);
checkErrorAfterKernelLaunch("facet_krnl, line 662");
/* Calculate vertex normals for this component as normalized sums of the
* facet normals for all facets attached to each vertex */
hipLaunchKernelGGL(( calc_vertex_nrmls_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dmod);
checkErrorAfterKernelLaunch("calc_vertex_nrmls, line 667");
}
/* Determine which vertices, facets, and sides of a multiple-component
model lie interior to the model rather than on the model's surface,
and reset their "act" (active) flags to zero */
__host__ void check_surface_gpu(struct mod_t *dmod, hipStream_t *rm_streams) {
dim3 THD; THD.x = maxThreadsPerBlock;
/* Calculate launch parameters */
nvBLK.x = floor((THD.x - 1 + nv) / THD.x);
nfBLK.x = floor((THD.x - 1 + nf) / THD.x);
nsBLK.x = floor((THD.x - 1 + ns) / THD.x);
/* 1-component model: flag all vertices and facets as active, then return */
hipLaunchKernelGGL(( set_real_active_vert_krnl), dim3(nvBLK),dim3(THD),0,rm_streams[0], dmod);
hipLaunchKernelGGL(( set_real_active_facet_krnl), dim3(nfBLK),dim3(THD),0,rm_streams[1], dmod);
hipLaunchKernelGGL(( set_real_active_side_krnl), dim3(nsBLK),dim3(THD),0,rm_streams[2], dmod);
checkErrorAfterKernelLaunch("set_real_active_side_krnl");
/* Synchronize streams to default stream */
for (int f=0; f<3; f++)
hipStreamSynchronize(rm_streams[f]);
}
__global__ void comp_moments_1stinit_krnl(struct mod_t *dmod) {
/* Single-thread kernel */
int j, k;
if (threadIdx.x == 0) {
dmod->shape.area = 0.0;
dmod->shape.volume = 0.0;
for (k=0; k<=2; k++) {
dmod->shape.com[k] = 0.0;
for (j=0; j<=2; j++)
dmod->shape.inertia[k][j] = 0.0;
}
}
}
__global__ void comp_moments_2ndinit_krnl(struct mod_t *dmod, double area1,
double area2, int c) {
/* Single-threaded kernel - meant to initialize the individual component
* com and inertia arrays */
if (threadIdx.x == 0) {
int j, k;
dmod->shape.comp[c].area = area1;
dmod->shape.area = area2;
dmod->shape.comp[0].volume = 0.0;
for (k=0; k<=2; k++) {
dmod->shape.comp[0].com[k] = 0.0;
for (j=0; j<=2; j++)
dmod->shape.comp[0].inertia[k][j] = 0.0;
}
dmod->shape.comp[0].area = 0.0; // actually 1st step in calculating surface area
}
}
__global__ void comp_moments_facet_krnl(struct mod_t *dmod, int c, double *dvarr,
double *dcom0, double *dcom1, double *dcom2, double *dI00, double *dI01,
double *dI02, double *dI10, double *dI11, double *dI12, double *dI20,
double *dI21, double *dI22) {
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
double dI[3][3], dcom[3], dv;
if (f < dmod->shape.comp[0].real.nf)
{
dev_facmom(dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x,
dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x,
dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x,
dmod->shape.comp[c].real.f[f].n, &dv, dcom, dI);
/* Assign calculated dv, dcom, dI to each facet for later parallel reduction */
dvarr[f] = dv;
dcom0[f] = dcom[0]; dcom1[f]= dcom[1]; dcom2[f]= dcom[2];
dI00[f] = dI[0][0]; dI01[f] = dI[0][1]; dI02[f] = dI[0][2];
dI10[f] = dI[1][0]; dI11[f] = dI[1][1]; dI12[f] = dI[1][2];
dI20[f] = dI[2][0]; dI21[f] = dI[2][1]; dI22[f] = dI[2][2];
}
}
__global__ void comp_moments_com_krnl(struct mod_t *dmod) {
/* Single-thread kernel */
if (threadIdx.x == 0) {
int j;
for (j=0; j<=2; j++) {
dmod->shape.comp[0].com[j] /= dmod->shape.comp[0].volume;
dmod->shape.com[j] /= dmod->shape.volume;
}
j = 2;
}
}
// TO DO: all functions in this file need to be adapated to handle multi-component
// models, which they currently do not support.
__host__ void compute_moments_gpu(struct mod_t *dmod, int nf, hipStream_t *cm_streams)
{
double area1=0.0, area2=0.0, *dv, *dcom0, *dcom1, *dcom2, *dI00, *dI01, *dI02,
*dI10, *dI11, *dI12, *dI20, *dI21, *dI22;
size_t arrsz = sizeof(double)*nf;
int c=0; // FIX THIS
/* Initialize the model's surface area, volume, center-of-mass (COM)
* displacement, and inertia tensor */
hipLaunchKernelGGL(( comp_moments_1stinit_krnl), dim3(1),dim3(1), 0, 0, dmod);
checkErrorAfterKernelLaunch("comp_moments_init_krnl, line 945");
/* CUDA note: Only single-component models for now.
* Loop over all model components, computing areas and moments (volume,
* center of mass, and inertia tensor); COM and inertia tensor are computed
* assuming uniform density. For multiple-component models, when computing
* the area and the moments for overall model, ignore facets interior to
* the model (i.e., that are inside some other component). */
/* Note that area2 (area of active facets summed up) is not currently
* implemented. A single-component model is assumed, in which case every
* facet is active and area1=area2 */
// for (c=0; c<dmod->shape.ncomp; c++) {
area1 = compute_model_area(dmod, c, nf);
area2 = area1;
/* Allocate temporary dv, dcom, dI pointers */
gpuErrchk(hipMalloc((void**)&dv, arrsz));
gpuErrchk(hipMalloc((void**)&dcom0,arrsz));
gpuErrchk(hipMalloc((void**)&dcom1,arrsz));
gpuErrchk(hipMalloc((void**)&dcom2,arrsz));
gpuErrchk(hipMalloc((void**)&dI00, arrsz));
gpuErrchk(hipMalloc((void**)&dI01, arrsz));
gpuErrchk(hipMalloc((void**)&dI02, arrsz));
gpuErrchk(hipMalloc((void**)&dI10, arrsz));
gpuErrchk(hipMalloc((void**)&dI11, arrsz));
gpuErrchk(hipMalloc((void**)&dI12, arrsz));
gpuErrchk(hipMalloc((void**)&dI20, arrsz));
gpuErrchk(hipMalloc((void**)&dI21, arrsz));
gpuErrchk(hipMalloc((void**)&dI22, arrsz));
/* Set area and initialize per-component COM and Inertia arrays */
hipLaunchKernelGGL(( comp_moments_2ndinit_krnl), dim3(1),dim3(1), 0, 0, dmod, area1, area2, c);
checkErrorAfterKernelLaunch("comp_moments_2ndinit_krnl");
/* Load the temporary arrays with data */
hipLaunchKernelGGL(( comp_moments_facet_krnl), dim3(nfBLK),dim3(nfTHD), 0, 0, dmod, c, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22);
checkErrorAfterKernelLaunch("comp_moments_facets_krnl64");
/* Calculate surface area for this component; for active facets, also add
* the contributions to the area of the overall model */
dvdI_reduce_gpu(dmod, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02,
dI10, dI11, dI12, dI20, dI21, dI22, nf, c, cm_streams);
/* This kernel computes the overall COM vector */
hipLaunchKernelGGL(( comp_moments_com_krnl), dim3(1),dim3(1), 0, 0, dmod);
checkErrorAfterKernelLaunch("comp_moments_com_krnl");
/* Free up the temporary arrays */
hipFree(dv);
hipFree(dcom0); hipFree(dcom1); hipFree(dcom2);
hipFree(dI00); hipFree(dI01); hipFree(dI02);
hipFree(dI10); hipFree(dI11); hipFree(dI12);
hipFree(dI20); hipFree(dI21); hipFree(dI22);
}
#undef HAIRWIDTH
#undef SMALLRATIO
#undef SMALLOVOIDK1
#undef SMALLOVOIDK2
#undef OVOIDTOL
#undef MAXEDGE
#undef EDGETOL
#undef RTOL
#undef SMALLCOEFF3
| 819adb462ab6bb7f215791bd2988fedfd8c5f75f.cu | /*****************************************************************************************
realize_mod.c
Takes a struct mod_t model and "realizes" its components as polyhedral solids made up of
triangular facets.
Modified 2016 July 9 by Matthias Engels:
Adapted for use with shape-cuda.
------------------------------------------------------------------------------------------
Modified 2014 April 26 by CM:
Increase the minimum permitted value of the highest-order coefficient in the cubic
equation that locates an ovoid vertex: if the coefficient is smaller than this
minimum, treat it as if it's zero and solve a quadratic equation instead
Modified 2014 March 22 by CM:
Relax the tolerance for finding a valid ovoid vertex position
Modified 2014 March 10 by CM:
Guard against roundoff problems when computing vertex positions for ovoid components
with very small |k|
Modified 2014 February 10 by CM:
Implement multiple radar and optical scattering laws
Modified 2013 August 28 by CM:
Set the bad diameter flag for harmonic components with tiny or negative vertex
displacements, and for harmonic and vertex components with tiny or negative
"scale factor" values
Modified 2013 June 2 by CM:
In the cubic_realroot routine, initialize nrealroots to avoid compilation warning
Fix a comment
Modified 2013 May 20 by CM:
Implement ovoid shape components
Modified 2012 July 4 by CM:
Add test in "realize_coordinates" routine to avoid compilation warning
Modified 2011 September 2 by CM:
Bug fix: the "check_surface" routine makes use of facet normals when identifying
active vs. inactive vertices and facets, but facet normals weren't being computed
until *after* check_surface was called
Make the code more modular (and address the above bug) by introducing the
"realize_coordinates" and "compute_moments" routines, as per the version of
realize_mod in the SHERMAN package
Store the area and the centroid coordinates of each facet
Add "harmlambert" optical scattering law (compute facet angular coordinates)
Modified 2010 September 1 by CM:
Add "facetnorm" argument to the rayfacint routine
Modified 2010 June 1 by CM:
Change "scalefactor" parameter from a scalar to a 3-component vector
Modified 2010 March 19 by CM:
Implement '=' state for vertex deviations
Modified 2009 November 15 by CM:
In the "check_surface" routine, eliminate an unused variable and fix
a couple of ambiguous nested if-then-else statements
Modified 2009 August 3 by CM:
For the "harmlommel" "harmhapke" "harmkaas" and "harmcosine_diff"
inhomogeneous scattering laws, compute the spherical coordinates
(theta and phi) of each facet after each component's rotational and
translational offsets have been applied rather than before, so that
these laws can be used for multiple-component models
For multiple-component models, use a more careful method (already used
for facets) to determine which vertices are on the model's surface;
also, for both vertices and facets, allow for a bit of roundoff
error in this determination by adding a tolerance argument to the
"rayfacint" routine
For multiple-component models, determine the new "act" (active) flag
for each model side
For multiple-component models, fix a bug in computing the center of mass
for individual components
Modified 2009 July 5 by CM:
Turn each component's rotational offsets into a rotation matrix here
rather than in the "read_mod" routine, in case the offsets are
being allowed to float
Modified 2009 July 1 by CM:
Add "check_surface" routine that determines which facets of a
multiple-component model lie on the model's surface rather than
interior to the model
For multiple-component models, when computing the area and the moments
of the overall model, ignore facets that lie interior to the model
Modified 2009 April 3 by CM:
Fix slight bug in defining function a[i] = 1/radius^2 when a/b or b/c
is tiny or negative for ellipsoid components
Initialize the "baddiam_logfactor" parameter and set its value when
2a, a/b, or b/c is tiny or negative for ellipsoid components
Modified 2007 August 10 by CM:
Eliminate unused variable
Modified 2007 January 8 by CM:
Define "scalefactor" state for vertex realizations of ellipsoid and
harmonic components, not just its value
Modified 2006 October 1 by CM:
Add "scalefactor" to harmonic and vertex shape structures
Replace ellipsoid diameters D with two_a, a_over_b, b_over_c
Modified 2005 September 6 by CM:
Add computation of facet angular coordinates for use with harmonic
scattering laws
Modified 2005 August 17 by CM:
Move computation of spherical harmonic functions afactor and bfactor
from here to read_mod.c, so that it can be done just once per fit
Modified 2005 February 28 by CM:
Initialize the "baddiam" parameter (flag indicating tiny or negative
ellipsoid diameters) to 0 here rather than in bestfit.c so that
it can be used for actions other than "fit"
Modified 2004 August 23 by CM:
Eliminated newtheta and oldcostheta variables and THETATOL constant,
since they weren't actually being used (i.e., the test in which
they were included was always true)
Modified 2003 April 17 by CM:
Added computation of component and model moments; this used to
be done in function penalties (but wasn't always being done)
Added code to cope with tiny or negative ellipsoid diameters;
as a result, must now pass the model's parameter structure
as an argument to realize_mod
Added surface area computation for components and for the full model
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
#define HAIRWIDTH 1.0e-7
#define SMALLRATIO 0.01
#define SMALLOVOIDK1 0.01
#define SMALLOVOIDK2 1.0e-6
#define OVOIDTOL 1.0e-6
#define MAXEDGE 100
#define EDGETOL 1.0e-14
#define RTOL 1000*EDGETOL
#define SMALLCOEFF3 1.0e-5
/* These 2 device variables are to get nf and nv from the GPU-located dmod file */
__device__ int dnv, dnf, dns;
__device__ double d_a[3];
__device__ double a_radius, a_over_b, b_over_c, k_asym, x0term, numer, denom, x0;
__device__ int harmonic_scatlaw;
static int nv, nf, ns;
static dim3 nvBLK,nvTHD,nfBLK,nfTHD,nsBLK;
__host__ void realize_coordinates_gpu(struct par_t *dpar, struct mod_t *dmod, unsigned char type, int gpuid);
__host__ void compute_moments_gpu(struct mod_t *dmod, int nf, cudaStream_t *cm_streams);
__global__ void set_diam_krnl(struct par_t *dpar, struct mod_t *dmod, int gpuid, int gpu0){
/* This is a single-thread kernel */
if (threadIdx.x == 0) {
if (gpuid==gpu0) {
dpar->baddiam = 0;
dpar->baddiam_logfactor = 0;
}
dnv = dmod->shape.comp[0].real.nv;
dnf = dmod->shape.comp[0].real.nf;
dns = dmod->shape.comp[0].real.ns;
}
}
__global__ void ellipse_diameter_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* This is a single-thread kernel */
double diam, diamratio;
if (threadIdx.x == 0) {
diam = dmod->shape.comp[0].desc.ell.two_a.val;
if (diam > HAIRWIDTH) {
d_a[0] = 2.0/diam; /* 1/radii */
} else {
d_a[0] = (2.0/HAIRWIDTH) * (1 + HAIRWIDTH - diam);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - diam);
}
diam = (2.0/d_a[0]);
diamratio = dmod->shape.comp[0].desc.ell.a_over_b.val;
if (diamratio > SMALLRATIO) {
d_a[1] = 2.0/(diam/diamratio);
} else {
d_a[1] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio);
}
diam = (2.0/d_a[1]);
diamratio = dmod->shape.comp[0].desc.ell.b_over_c.val;
if (diamratio > SMALLRATIO) {
d_a[2] = 2.0/(diam/diamratio);
} else {
d_a[2] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio);
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio);
}
d_a[0] *= d_a[0];
d_a[1] *= d_a[1];
d_a[2] *= d_a[2];
}
}
__global__ void ellipse_distance_krnl(struct par_t *dpar, struct mod_t *dmod) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int j;
double den;
if (offset < dmod->shape.comp[0].real.nv) {
/* Routine setuprealver (called by setupreal, which was called by
* read_mod) already created as many ellipsoid vertices as were needed
* for specified value of theta_steps, and initialized direction
* cosines u[j] for each vertex to be
* sin(theta)cos(phi), sin(theta)sin(phi), and cos(theta) for
* j=0, 1, and 2, respectively.
*
* These values are x/r, y/r, and z/r, where r is distance from origin
* to ellipsoid surface along direction (theta, phi) for given vertex.
* Since an ellipsoid has (x/a)^2 + (y/b)^2 + (z/c)^2 = 1, quantity
* "den" in code below is equal to 1/(r^2) for vertex i.
*
* Note that setuprealver initialized all vertex "base points" a[j] to
* be zero for ellipsoid components; hence "deviation" r is in fact the
* entire thing. */
den = 0.0;
for (j=0; j<=2; j++)
den += d_a[j]*( dmod->shape.comp[0].real.v[offset].u[j]
* dmod->shape.comp[0].real.v[offset].u[j] );
dmod->shape.comp[0].real.v[offset].r.val = 1/sqrt(den);
}
}
__global__ void ellipse_scalefactor_krnl(struct mod_t *dmod) {
/* Single-threaded kernel */
int j;
if (threadIdx.x == 0) {
dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ell.two_a.state;
dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ell.a_over_b.state;
dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ell.b_over_c.state;
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.scalefactor[j].val = 1.0;
}
}
__global__ void set_ovoid_parameters_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
/* Determine all shape parameters, making sure that none are out of bounds */
a_radius = dmod->shape.comp[0].desc.ovoid.two_a.val / 2;
if (a_radius <= HAIRWIDTH/2) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*a_radius);
a_radius = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*a_radius);
}
a_over_b = dmod->shape.comp[0].desc.ovoid.a_over_b.val;
if (a_over_b <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - a_over_b);
a_over_b = SMALLRATIO / (1 + SMALLRATIO - a_over_b);
}
b_over_c = dmod->shape.comp[0].desc.ovoid.b_over_c.val;
if (b_over_c <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - b_over_c);
b_over_c = SMALLRATIO / (1 + SMALLRATIO - b_over_c);
}
k_asym = dmod->shape.comp[0].desc.ovoid.k.val;
if (fabs(k_asym) > 1 - SMALLVAL) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(fabs(k_asym) + SMALLVAL);
if (k_asym > 0.0)
k_asym = 1 - SMALLVAL*(1 - SMALLVAL)/k_asym;
else
k_asym = -1 - SMALLVAL*(1 - SMALLVAL)/k_asym;
}
/* Compute x0, the x-offset that places the ovoid's center of mass at the
* origin; for small |k|, use an analytical approximation to avoid
* roundoff problems */
if (fabs(k_asym) > SMALLOVOIDK1) {
x0term = 3*(1 - k_asym*k_asym)*log((1 + k_asym)/(1 - k_asym));
numer = 2.0*k_asym*(3 - 2*k_asym*k_asym) - x0term;
denom = 2.0*k_asym*(3 - k_asym*k_asym) - x0term;
x0 = (a_radius/k_asym)*(numer/denom);
} else {
x0 = 0.4*k_asym*a_radius;
}
}
}
__global__ void ovoid_distance_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, k, nrealroots;
double a_over_c, h, alpha0, u_x, coeff[4], goodroot, realroot[3], x_over_a;
if (i < dmod->shape.comp[0].real.nv) {
a_over_c = a_over_b*b_over_c;
h = a_over_b*a_over_b*dmod->shape.comp[0].real.v[i].u[1]
*dmod->shape.comp[0].real.v[i].u[1] + a_over_c*a_over_c
*dmod->shape.comp[0].real.v[i].u[2]*dmod->shape.comp[0].real.v[i].u[2];
alpha0 = x0/a_radius;
u_x = dmod->shape.comp[0].real.v[i].u[0];
coeff[3] = (h - u_x*u_x)*k_asym*u_x;
coeff[2] = (1 + 3*k_asym*alpha0)*u_x*u_x + h*(1 - k_asym*alpha0);
coeff[1] = (k_asym - (2 + 3*k_asym*alpha0)*alpha0)*u_x;
coeff[0] = -(1 - alpha0*alpha0)*(1 + k_asym*alpha0);
if (fabs(k_asym) <= SMALLOVOIDK2) {
/* |k| is very small, so guard against roundoff error by
* computing the vertex position for an ellipsoid (k = 0) and then
* applying a first-order correction for nonzero k */
goodroot = 1/sqrt(u_x*u_x + h);
goodroot -= (coeff[3]*goodroot*goodroot*goodroot + coeff[1]*goodroot)
/ (3*coeff[3]*goodroot*goodroot + 2*coeff[2]*goodroot + coeff[1]);
} else {
/* |k| isn't very small, so solve the cubic equation */
nrealroots = cubic_realroots_cuda( coeff, realroot);
goodroot = -HUGENUMBER;
for (k=0; k<nrealroots; k++)
if (realroot[k] >= 0.0) {
x_over_a = realroot[k]*u_x;
if (fabs(x_over_a - alpha0) - 1 < OVOIDTOL)
goodroot = MAX( goodroot, realroot[k]);
}
}
if (goodroot < 0.0)
printf("Can't compute vertex displacement for ovoid vertex %d\n", i);
dmod->shape.comp[0].real.v[i].r.val = goodroot*a_radius;
/* Assign scalefactor values */
dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ovoid.two_a.state;
dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ovoid.a_over_b.state;
dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ovoid.b_over_c.state;
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.scalefactor[j].val = 1.0;
}
}
__global__ void harmonic_krnl(struct par_t *dpar, struct mod_t *dmod){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int L, l, m;
double r;
if (i < dnv) {
L = dmod->shape.comp[0].desc.har.nhar;
r = 0.0;
for (l=0; l<=L; l++) {
r += dmod->shape.comp[0].desc.har.a[l][0].val
* dmod->shape.comp[0].real.v[i].afactor[l][0];
for (m=1; m<=l; m++)
r += dmod->shape.comp[0].desc.har.a[l][m].val
* dmod->shape.comp[0].real.v[i].afactor[l][m]
+ dmod->shape.comp[0].desc.har.b[l][m].val
* dmod->shape.comp[0].real.v[i].bfactor[l][m];
}
if (r > HAIRWIDTH/2) {
dmod->shape.comp[0].real.v[i].r.val = r;
} else {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*r) / ((L+1)*(L+1));
dmod->shape.comp[0].real.v[i].r.val = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*r);
}
}
}
__global__ void harmonic_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) {
// This is a 3-thread single thread kernel
int j = threadIdx.x;
if (j < 3){
if (j > 0 && dmod->shape.comp[0].desc.har.scalefactor[j].state == '=')
dmod->shape.comp[0].desc.har.scalefactor[j].val
= dmod->shape.comp[0].desc.har.scalefactor[j-1].val;
dmod->shape.comp[0].real.scalefactor[j].state = dmod->shape.comp[0].desc.har.scalefactor[j].state;
dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j].val;
if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO
/ (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
}
}
}
__global__ void vertex_update_dev_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int v_mirror;
if (i < dmod->shape.comp[0].real.nv) {
if (dmod->shape.comp[0].real.v[i].r.state == '=') {
v_mirror = dmod->shape.comp[0].real.v[i].v_mirror;
dmod->shape.comp[0].real.v[i].r.val =
dmod->shape.comp[0].real.v[v_mirror].r.val;
}
}
}
__global__ void vertex_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) {
// This is a 3-thread single thread kernel
//int j = threadIdx.x;
if (threadIdx.x == 0) {
for (int j=0; j<=2; j++) {
if (j > 0 && dmod->shape.comp[0].desc.ver.scalefactor[j].state == '=')
dmod->shape.comp[0].desc.ver.scalefactor[j].val
= dmod->shape.comp[0].desc.ver.scalefactor[j-1].val;
dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j].val;
if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) {
dpar->baddiam = 1;
dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO
/ (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val);
}
}
}
__syncthreads();
}
__global__ void calc_vertex_co_krnl(struct par_t *dpar, struct mod_t *dmod) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
if (i < dnv){
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] = dmod->shape.comp[0].real.scalefactor[j].val
* (dmod->shape.comp[0].real.v[i].u[j] * dmod->shape.comp[0].real.v[i].r.val
+ dmod->shape.comp[0].real.v[i].a[j]);
}
}
__global__ void perform_rotation_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x[3];
int j, k, test;
if (i == 0) {
if (!(dmod->shape.comp[0].rot[0].val == 0 &&
dmod->shape.comp[0].rot[1].val == 0 &&
dmod->shape.comp[0].rot[2].val == 0 ))
test = 1;
}
__syncthreads();
if (i < dnv && test == 1) {
for (j=0; j<=2; j++) {
x[j] = 0.0;
for (k=0; k<=2; k++)
x[j] += dmod->shape.comp[0].m[j][k] * dmod->shape.comp[0].real.v[i].x[k];
}
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] = x[j];
}
}
__global__ void perform_translation_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j, test;
if (i == 0) {
if (!(dmod->shape.comp[0].off[0].val == 0.0 &&
dmod->shape.comp[0].off[1].val == 0.0 &&
dmod->shape.comp[0].off[2].val == 0.0 ))
test = 1;
}
__syncthreads();
if (i < dnv && test == 1){
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.v[i].x[j] += dmod->shape.comp[0].off[j].val;
}
}
__global__ void set_optical_params_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* Single-thread kernel */
int ilaw;
harmonic_scatlaw = 0;
if (threadIdx.x == 0) {
for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++)
if (dmod->photo.opttype[ilaw] == HARMLAMBERT || dmod->photo.opttype[ilaw] == HARMLOMMEL
|| dmod->photo.opttype[ilaw] == HARMHAPKE
|| dmod->photo.opttype[ilaw] == HARMKAAS)
harmonic_scatlaw = 1;
for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++)
if (dmod->photo.radtype[ilaw] == HARMCOSINE_DIFF)
harmonic_scatlaw = 1;
}
}
__global__ void calc_vertex_nrmls_krnl(struct mod_t *dmod) {
/* nv-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
double n[3];
int j, k, naf, f;
if (i < dmod->shape.comp[0].real.nv){
n[0] = n[1] = n[2] = 0.0;
naf = dmod->shape.comp[0].real.v[i].naf;
for (j=0; j<naf; j++) {
f = dmod->shape.comp[0].real.v[i].af[j];
n[0] += dmod->shape.comp[0].real.f[f].n[0];
n[1] += dmod->shape.comp[0].real.f[f].n[1];
n[2] += dmod->shape.comp[0].real.f[f].n[2];
}
dev_normalize( n);
for (k=0; k<=2; k++)
dmod->shape.comp[0].real.v[i].n[k] = n[k];
}
}
__global__ void facet_krnl(struct par_t *dpar, struct mod_t *dmod) {
/* For each facet of this component, compute the outward unit normal,
* the area, the mean coordinates of the three corner vertices, and
* the corresponding angular coordinates (for some scattering laws) */
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int j;
if (f < dnf) {
dmod->shape.comp[0].real.f[f].area = dev_facnrm(dmod->shape.comp[0].real, f);
for (j=0; j<=2; j++)
dmod->shape.comp[0].real.f[f].x[j] = (dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x[j] +
dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x[j] +
dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x[j] )/3;
if (harmonic_scatlaw) {
dmod->shape.comp[0].real.f[f].theta = atan2( sqrt(dmod->shape.comp[0].real.f[f].x[0]*dmod->shape.comp[0].real.f[f].x[0] +
dmod->shape.comp[0].real.f[f].x[1]*dmod->shape.comp[0].real.f[f].x[1] ),
dmod->shape.comp[0].real.f[f].x[2]);
dmod->shape.comp[0].real.f[f].phi = atan2( dmod->shape.comp[0].real.f[f].x[1], dmod->shape.comp[0].real.f[f].x[0]);
}
}
}
__global__ void set_real_active_vert_krnl(struct mod_t *dmod) {
/* nv-threaded kernel */
int v = blockIdx.x * blockDim.x + threadIdx.x;
if (v < dmod->shape.comp[0].real.nv) //dmod->shape.comp[0].real.nv)
dmod->shape.comp[0].real.v[v].act = 1;
}
__global__ void set_real_active_facet_krnl(struct mod_t *dmod) {
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
if (f < dmod->shape.comp[0].real.nf)
dmod->shape.comp[0].real.f[f].act = 1;
}
__global__ void set_real_active_side_krnl(struct mod_t *dmod) {
/* ns-threaded kernel */
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < dmod->shape.comp[0].real.ns)
dmod->shape.comp[0].real.s[k].act = 1;
}
__host__ void realize_mod_gpu( struct par_t *dpar, struct mod_t *dmod,
unsigned char type, int nf, cudaStream_t *rm_streams) {
/* We need to realize each model component as a polyhedral solid with
triangular facets. The first step is to call realize_coordinates,
which computes the displacement of each vertex in this realization,
represented as a base displacement plus a vertex deviation (either
positive or negative) along a specified set of direction cosines.
Additionally, for each facet it computes the outward unit normal,
the area, the mean coordinates of the corner vertices, and (for
some scattering laws) the corresponding angular coordinates. */
realize_coordinates_gpu(dpar, dmod, type, GPU0);
/* For multiple-component models, figure out which facets lie on
the model's surface and which fall within some other component;
such facets will have their "act" (active) flag reset to zero. */
check_surface_gpu(dmod, rm_streams);
/* Compute the area and moments (volume, center of mass, and
inertia tensor) of each component and of the overall model */
compute_moments_gpu(dmod, nf, rm_streams);
}
/* Compute the vertex coordinates and (if necessary) facet angular coordinates
for each component of the model's vertex realization */
__host__ void realize_coordinates_gpu( struct par_t *dpar, struct mod_t *dmod, unsigned char type, int gpuid){
dim3 THD;
THD.x = maxThreadsPerBlock;
/* Loop over all model components, realizing each one as a polyhedral solid
* with triangular facets. Compute displacement of each vertex in this
* realization, represented as a base displacement plus a vertex deviation
* (positive or negative) along a specified set of direction cosines*/
/* Call Kernel to initialize flag for tiny/negative ellipsoid diameters */
set_diam_krnl<<<1,1>>>(dpar, dmod, gpuid, GPU0);//, dnv, dnf);
checkErrorAfterKernelLaunch("set_diam_krnl");
/* Note: The CUDA-code assumes a single-component model for now. */
/* Loop over all model components, realizing each one as a polyhedral solid
* with triangular facets. Compute the displacement of each vertex in this
* realization, represented as a base displacement plus a vertex deviation
* (positive or negative) along a specified set of direction cosines. */
/* Copy nf and nv back from device copies dnf and dnv; used as launch
* parameters below */
gpuErrchk(cudaMemcpyFromSymbol(&nv, dnv, sizeof(nv), 0, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpyFromSymbol(&nf, dnf, sizeof(nv), 0, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpyFromSymbol(&ns, dns, sizeof(nv), 0, cudaMemcpyDeviceToHost));
/* Calculate launch parameters for kernels going over vertices (nvBLK) and
* facets (nfBLK) */
nvTHD.x = nfTHD.x = THD.x = maxThreadsPerBlock;
nvBLK.x = floor((THD.x - 1 + nv) / THD.x);
nfBLK.x = floor((THD.x - 1 + nf) / THD.x);
/* Check component type & create corresponding vertex realization. */
switch (type) {
case ELLIPSE:
/* To avoid negative diameters/very small positive diameters,
* adjust the function a[i] = 1/radius[i]^2 so it monotonically
* increases as diameter[i] decreases through zero and beyond,
* rather than being symmetric about zero diameter. Also set flag
* "baddiam" when any diameter is very small or negative, so that
* extra penalties can later be applied to this model. */
/* Launch ellipse diameter kernel */
ellipse_diameter_krnl<<<1,1>>>(dpar, dmod);
checkErrorAfterKernelLaunch("ellipse_diameter_krnl");
/* Kernel finds distance of each vertex to ellipsoid's center */
ellipse_distance_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("ellipse_distance_krnl");
/* Launch kernel to set real->scalefactor */
ellipse_scalefactor_krnl<<<1,1>>>(dmod);
checkErrorAfterKernelLaunch("ellipse_scalefactor_krnl");
break;
case OVOID:
/* Determine all shape parameters, making sure that none are out of bounds */
set_ovoid_parameters_krnl<<<1,1>>>(dpar, dmod);
checkErrorAfterKernelLaunch("set_ovoid_parameters_krnl");
/* Kernel finds distance of each vertex to ovoid's center */
ovoid_distance_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("ovoid_distance_krnl");
break;
case HARMONIC:
/* Kernel sets parameters associated with harmonic model */
harmonic_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("harmonic_krnl");
THD.x = 3;
harmonic_scalefactor_krnl<<<1,THD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("harmonic_scalefactor_krnl");
break;
case VERTEX:
/* The vertex type is its own realization, but we still need to update
* the values of the "scale factor" parameters and update any vertex
* deviations that have the '=' state */
vertex_update_dev_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("vertex_update_dev_kernel");
THD.x = 3;
vertex_scalefactor_krnl<<<1,THD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("vertex_scalefactor_krnl");
break;
default:
printf("realize_mod.c: don't know that component type\n");
} /* end of switch statement for component type */
/* Calculate vertex coordinates for this component */
calc_vertex_co_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("calc_vertex_co_krnl");
/* Use this component's rotational offset angles to create comp[c].m, the
* rotation matrix that will be applied to the vertex coordinates */
euler2mat_realize_mod_krnl<<<1,1>>>(dmod);
checkErrorAfterKernelLaunch("dev_euler2mat");
/* If needed, perform rotation on this component */
perform_rotation_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("perform_rotation_krnl");
/* If needed, perform translation on this component */
perform_translation_krnl<<<nvBLK,nvTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("perform_translation_krnl, line 651");
/* Figure out if optical/radar harmonic scattering laws are in use *
* and set the flag harmonic_scatlaw accordingly */
set_optical_params_krnl<<<1,1>>>(dpar, dmod);
checkErrorAfterKernelLaunch("set_optical_params_krnl, line 656");
/* For each facet of this component, compute outward unit normal, area,
* mean coordinates of the three corner vertices, and corresponding angular
* coordinates (for some scattering laws) */
facet_krnl<<<nfBLK,nfTHD>>>(dpar, dmod);
checkErrorAfterKernelLaunch("facet_krnl, line 662");
/* Calculate vertex normals for this component as normalized sums of the
* facet normals for all facets attached to each vertex */
calc_vertex_nrmls_krnl<<<nvBLK,nvTHD>>>(dmod);
checkErrorAfterKernelLaunch("calc_vertex_nrmls, line 667");
}
/* Determine which vertices, facets, and sides of a multiple-component
model lie interior to the model rather than on the model's surface,
and reset their "act" (active) flags to zero */
__host__ void check_surface_gpu(struct mod_t *dmod, cudaStream_t *rm_streams) {
dim3 THD; THD.x = maxThreadsPerBlock;
/* Calculate launch parameters */
nvBLK.x = floor((THD.x - 1 + nv) / THD.x);
nfBLK.x = floor((THD.x - 1 + nf) / THD.x);
nsBLK.x = floor((THD.x - 1 + ns) / THD.x);
/* 1-component model: flag all vertices and facets as active, then return */
set_real_active_vert_krnl<<<nvBLK,THD,0,rm_streams[0]>>>(dmod);
set_real_active_facet_krnl<<<nfBLK,THD,0,rm_streams[1]>>>(dmod);
set_real_active_side_krnl<<<nsBLK,THD,0,rm_streams[2]>>>(dmod);
checkErrorAfterKernelLaunch("set_real_active_side_krnl");
/* Synchronize streams to default stream */
for (int f=0; f<3; f++)
cudaStreamSynchronize(rm_streams[f]);
}
__global__ void comp_moments_1stinit_krnl(struct mod_t *dmod) {
/* Single-thread kernel */
int j, k;
if (threadIdx.x == 0) {
dmod->shape.area = 0.0;
dmod->shape.volume = 0.0;
for (k=0; k<=2; k++) {
dmod->shape.com[k] = 0.0;
for (j=0; j<=2; j++)
dmod->shape.inertia[k][j] = 0.0;
}
}
}
__global__ void comp_moments_2ndinit_krnl(struct mod_t *dmod, double area1,
double area2, int c) {
/* Single-threaded kernel - meant to initialize the individual component
* com and inertia arrays */
if (threadIdx.x == 0) {
int j, k;
dmod->shape.comp[c].area = area1;
dmod->shape.area = area2;
dmod->shape.comp[0].volume = 0.0;
for (k=0; k<=2; k++) {
dmod->shape.comp[0].com[k] = 0.0;
for (j=0; j<=2; j++)
dmod->shape.comp[0].inertia[k][j] = 0.0;
}
dmod->shape.comp[0].area = 0.0; // actually 1st step in calculating surface area
}
}
__global__ void comp_moments_facet_krnl(struct mod_t *dmod, int c, double *dvarr,
double *dcom0, double *dcom1, double *dcom2, double *dI00, double *dI01,
double *dI02, double *dI10, double *dI11, double *dI12, double *dI20,
double *dI21, double *dI22) {
/* nf-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
double dI[3][3], dcom[3], dv;
if (f < dmod->shape.comp[0].real.nf)
{
dev_facmom(dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x,
dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x,
dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x,
dmod->shape.comp[c].real.f[f].n, &dv, dcom, dI);
/* Assign calculated dv, dcom, dI to each facet for later parallel reduction */
dvarr[f] = dv;
dcom0[f] = dcom[0]; dcom1[f]= dcom[1]; dcom2[f]= dcom[2];
dI00[f] = dI[0][0]; dI01[f] = dI[0][1]; dI02[f] = dI[0][2];
dI10[f] = dI[1][0]; dI11[f] = dI[1][1]; dI12[f] = dI[1][2];
dI20[f] = dI[2][0]; dI21[f] = dI[2][1]; dI22[f] = dI[2][2];
}
}
__global__ void comp_moments_com_krnl(struct mod_t *dmod) {
/* Single-thread kernel */
if (threadIdx.x == 0) {
int j;
for (j=0; j<=2; j++) {
dmod->shape.comp[0].com[j] /= dmod->shape.comp[0].volume;
dmod->shape.com[j] /= dmod->shape.volume;
}
j = 2;
}
}
// TO DO: all functions in this file need to be adapated to handle multi-component
// models, which they currently do not support.
__host__ void compute_moments_gpu(struct mod_t *dmod, int nf, cudaStream_t *cm_streams)
{
double area1=0.0, area2=0.0, *dv, *dcom0, *dcom1, *dcom2, *dI00, *dI01, *dI02,
*dI10, *dI11, *dI12, *dI20, *dI21, *dI22;
size_t arrsz = sizeof(double)*nf;
int c=0; // FIX THIS
/* Initialize the model's surface area, volume, center-of-mass (COM)
* displacement, and inertia tensor */
comp_moments_1stinit_krnl<<<1,1>>>(dmod);
checkErrorAfterKernelLaunch("comp_moments_init_krnl, line 945");
/* CUDA note: Only single-component models for now.
* Loop over all model components, computing areas and moments (volume,
* center of mass, and inertia tensor); COM and inertia tensor are computed
* assuming uniform density. For multiple-component models, when computing
* the area and the moments for overall model, ignore facets interior to
* the model (i.e., that are inside some other component). */
/* Note that area2 (area of active facets summed up) is not currently
* implemented. A single-component model is assumed, in which case every
* facet is active and area1=area2 */
// for (c=0; c<dmod->shape.ncomp; c++) {
area1 = compute_model_area(dmod, c, nf);
area2 = area1;
/* Allocate temporary dv, dcom, dI pointers */
gpuErrchk(cudaMalloc((void**)&dv, arrsz));
gpuErrchk(cudaMalloc((void**)&dcom0,arrsz));
gpuErrchk(cudaMalloc((void**)&dcom1,arrsz));
gpuErrchk(cudaMalloc((void**)&dcom2,arrsz));
gpuErrchk(cudaMalloc((void**)&dI00, arrsz));
gpuErrchk(cudaMalloc((void**)&dI01, arrsz));
gpuErrchk(cudaMalloc((void**)&dI02, arrsz));
gpuErrchk(cudaMalloc((void**)&dI10, arrsz));
gpuErrchk(cudaMalloc((void**)&dI11, arrsz));
gpuErrchk(cudaMalloc((void**)&dI12, arrsz));
gpuErrchk(cudaMalloc((void**)&dI20, arrsz));
gpuErrchk(cudaMalloc((void**)&dI21, arrsz));
gpuErrchk(cudaMalloc((void**)&dI22, arrsz));
/* Set area and initialize per-component COM and Inertia arrays */
comp_moments_2ndinit_krnl<<<1,1>>>(dmod, area1, area2, c);
checkErrorAfterKernelLaunch("comp_moments_2ndinit_krnl");
/* Load the temporary arrays with data */
comp_moments_facet_krnl<<<nfBLK,nfTHD>>>(dmod, c, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22);
checkErrorAfterKernelLaunch("comp_moments_facets_krnl64");
/* Calculate surface area for this component; for active facets, also add
* the contributions to the area of the overall model */
dvdI_reduce_gpu(dmod, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02,
dI10, dI11, dI12, dI20, dI21, dI22, nf, c, cm_streams);
/* This kernel computes the overall COM vector */
comp_moments_com_krnl<<<1,1>>>(dmod);
checkErrorAfterKernelLaunch("comp_moments_com_krnl");
/* Free up the temporary arrays */
cudaFree(dv);
cudaFree(dcom0); cudaFree(dcom1); cudaFree(dcom2);
cudaFree(dI00); cudaFree(dI01); cudaFree(dI02);
cudaFree(dI10); cudaFree(dI11); cudaFree(dI12);
cudaFree(dI20); cudaFree(dI21); cudaFree(dI22);
}
#undef HAIRWIDTH
#undef SMALLRATIO
#undef SMALLOVOIDK1
#undef SMALLOVOIDK2
#undef OVOIDTOL
#undef MAXEDGE
#undef EDGETOL
#undef RTOL
#undef SMALLCOEFF3
|
add_atomic.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define BLOCK_WIDTH 1000
#define ARRAY_SIZE 10
__global__ void add_naive(int *arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
i= i%ARRAY_SIZE;
arr[i] = arr[i]+1;
}
__global__ void add_atomic(int *arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
i= i%ARRAY_SIZE;
atomicAdd(&arr[i],1);
}
int main(int argc, char **argv){
GpuTimer timer;
int h_arr[ARRAY_SIZE];
memset(h_arr, 0, sizeof(h_arr));
int *d_arr;
hipMalloc((void **) &d_arr, ARRAY_SIZE*sizeof(int));
hipMemcpy(d_arr, h_arr, ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice);
timer.Start();
hipLaunchKernelGGL(( add_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_arr);
timer.Stop();
hipMemcpy(h_arr, d_arr, ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i=0;i<ARRAY_SIZE;i++) printf("%d, ",h_arr[i]);
printf("\nTime taken: %g ms\n", timer.Elapsed());
hipFree(d_arr);
return 0;
} | add_atomic.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define BLOCK_WIDTH 1000
#define ARRAY_SIZE 10
__global__ void add_naive(int *arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
i= i%ARRAY_SIZE;
arr[i] = arr[i]+1;
}
__global__ void add_atomic(int *arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
i= i%ARRAY_SIZE;
atomicAdd(&arr[i],1);
}
int main(int argc, char **argv){
GpuTimer timer;
int h_arr[ARRAY_SIZE];
memset(h_arr, 0, sizeof(h_arr));
int *d_arr;
cudaMalloc((void **) &d_arr, ARRAY_SIZE*sizeof(int));
cudaMemcpy(d_arr, h_arr, ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
timer.Start();
add_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_arr);
timer.Stop();
cudaMemcpy(h_arr, d_arr, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<ARRAY_SIZE;i++) printf("%d, ",h_arr[i]);
printf("\nTime taken: %g ms\n", timer.Elapsed());
cudaFree(d_arr);
return 0;
} |
9b19da2351902f5ea7e50b140302f30094ffda72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __fillToInds3DLong(long long A, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
} | 9b19da2351902f5ea7e50b140302f30094ffda72.cu | #include "includes.h"
__global__ void __fillToInds3DLong(long long A, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
} |
2902879028e9bc863a11dbe71a69fbba07537fbc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
//
// to run with 512 elements:
// ./vanilladeriv 512
#define dfloat float
// note the order of the fields below is also assumed in the code.
const int _nstate = 5;
const int _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int _nvgeo = 14;
const int _XIx = 0;
const int _ETAx = 1;
const int _ZETAx = 2;
const int _XIy = 3;
const int _ETAy = 4;
const int _ZETAy = 5;
const int _XIz = 6;
const int _ETAz = 7;
const int _ZETAz = 8;
const int _MJ = 9;
const int _MJI = 10;
const int _x = 11;
const int _y = 12;
const int _z = 13;
#define grav ((dfloat) 9.81)
#define R_d ((dfloat)287.0024093890231)
#define cp_d ((dfloat)1004.5084328615809)
#define cv_d ((dfloat)717.5060234725578)
#define gamma_d ((dfloat)1.4)
#define gdm1 ((dfloat)0.4)
// Volume RHS for 3-D
template <int Nq, int Np, int nmoist, int ntrace, int nvar>
__global__ void volumerhs_v2(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][Nq][_nstate];
__shared__ dfloat s_H[Nq][Nq][Nq][_nstate];
int e = blockIdx.x;
int k = threadIdx.z;
int j = threadIdx.y;
int i = threadIdx.x;
if(k == 0)
s_D[j][i] = D[j*Nq+i];
// Load values will need into registers
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][k][_R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][k][_U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][k][_V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][k][_W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][k][_E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][k][_R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][k][_U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][k][_V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][k][_W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][k][_E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
s_H[i][j][k][_R] = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
s_H[i][j][k][_U] = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
s_H[i][j][k][_V] = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
s_H[i][j][k][_W] = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
s_H[i][j][k][_E] = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
__syncthreads();
dfloat rhsU = 0, rhsV = 0, rhsW = 0, rhsR = 0, rhsE = 0;
dfloat MJI = vgeo[gid + _MJI*Np];
// buoyancy term
rhsW -= R * gravity;
// loop of XI-grid lines
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhsR += MJI_Dni * s_F[n][ j][ k][ _R];
rhsR += MJI_Dnj * s_G[i][ n][ k][ _R];
rhsR += MJI_Dnk * s_H[i][ j][ n][ _R];
rhsU += MJI_Dni * s_F[n][ j][ k][ _U];
rhsU += MJI_Dnj * s_G[i][ n][ k][ _U];
rhsU += MJI_Dnk * s_H[i][ j][ n][ _U];
rhsV += MJI_Dni * s_F[n][ j][ k][ _V];
rhsV += MJI_Dnj * s_G[i][ n][ k][ _V];
rhsV += MJI_Dnk * s_H[i][ j][ n][ _V];
rhsW += MJI_Dni * s_F[n][ j][ k][ _W];
rhsW += MJI_Dnj * s_G[i][ n][ k][ _W];
rhsW += MJI_Dnk * s_H[i][ j][ n][ _W];
rhsE += MJI_Dni * s_F[n][ j][ k][ _E];
rhsE += MJI_Dnj * s_G[i][ n][ k][ _E];
rhsE += MJI_Dnk * s_H[i][ j][ n][ _E];
}
rhs[qid + Np*_U] += rhsU;
rhs[qid + Np*_V] += rhsV;
rhs[qid + Np*_W] += rhsW;
rhs[qid + Np*_R] += rhsR;
rhs[qid + Np*_E] += rhsE;
// loop over moist variables
// FIXME: Currently just passive advection
// TODO: This should probably be unrolled by some factor
for(int m =0;m<nmoist;++m){
int s = _nstate + m;
__syncthreads();
dfloat Qmoist = Q[qid + s*Np];
dfloat fx = U * Rinv * Qmoist;
dfloat fy = V * Rinv * Qmoist;
dfloat fz = W * Rinv * Qmoist;
s_F[i][j][k][1] = MJ * (XIx * fx + XIy * fy + XIz * fz);
s_G[i][j][k][1] = MJ * (ETAx * fx + ETAy * fy + ETAz * fz);
s_H[i][j][k][1] = MJ * (ZETAx * fx + ZETAy * fy + ZETAz * fz);
__syncthreads();
dfloat rhsmoist = 0;
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhsmoist += MJI_Dni * s_F[n][j][k][1];
rhsmoist += MJI_Dnj * s_G[i][n][k][1];
rhsmoist += MJI_Dnk * s_H[i][j][n][1];
}
rhs[qid + s*Np] += rhsmoist;
}
// Loop over trace variables
// TODO: This should probably be unrolled by some factor
dfloat rhstrace = 0;
for(int m=0;m<ntrace;++m){
int s = _nstate + nmoist + m;
__syncthreads();
dfloat Qtrace = Q[qid+s*Np];
dfloat fx = U * Rinv * Qtrace;
dfloat fy = V * Rinv * Qtrace;
dfloat fz = W * Rinv * Qtrace;
s_F[i][j][k][1] = MJ * (XIx * fx + XIy * fy + XIz * fz);
s_G[i][j][k][1] = MJ * (ETAx * fx + ETAy * fy + ETAz * fz);
s_H[i][j][k][1] = MJ * (ZETAx * fx + ZETAy * fy + ZETAz * fz);
__syncthreads();
// TODO: Prefetch MJI and rhs
rhstrace = 0;
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhstrace += MJI_Dni * s_F[n][j][k][1];
rhstrace += MJI_Dnj * s_G[i][n][k][1];
rhstrace += MJI_Dnk * s_H[i][j][n][1];
}
rhs[qid+s*Np] += rhstrace;
}
}
template <int Nq, int Np, int nmoist, int ntrace, int nvar>
__global__ void volumerhs_v3(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int e = blockIdx.x;
int j = threadIdx.y;
int i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
__syncthreads();
// Load values will need into registers
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat Dkn = s_D[k][n];
r_rhsR[n] += Dkn * r_HR;
r_rhsU[n] += Dkn * r_HU;
r_rhsV[n] += Dkn * r_HV;
r_rhsW[n] += Dkn * r_HW;
r_rhsE[n] += Dkn * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
hipMalloc(c_q, N*sizeof(dfloat));
for(int n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
hipMemcpy(c_q[0], q[0], N*sizeof(dfloat), hipMemcpyHostToDevice);
}
void shiftEntries(int start, int end, dfloat shift, dfloat *q, dfloat *c_q){
for(int n=start;n<end;++n){
q[n] += shift;
}
hipMemcpy(c_q+start, q+start, (end-start)*sizeof(dfloat), hipMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
int nelem = atoi(argv[1]);
const int N = 4;
const int nmoist = 0;
const int ntrace = 0;
const int nvar = _nstate + nmoist + ntrace;
const int Nq = N+1;
const int Np = Nq*Nq*Nq;
const int Ntotal = Np*nelem*nvar;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int e=0;e<nelem;++e){
for(int n=0;n<Np;++n){
int idR = n + _R*Np + e*nvar*Np;
int idE = n + _E*Np + e*nvar*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
hipMemcpy(c_Q, Q, nelem*nvar*Np*sizeof(dfloat), hipMemcpyHostToDevice);
const int Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int e=0;e<nelem;++e){
for(int n=0;n<Np;++n){
int idMJ = n + _MJ*Np + e*_nvgeo*Np;
int idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
hipMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), hipMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs_v2, *c_rhs_v2;
dfloat *rhs_v3, *c_rhs_v3;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs_v2, &c_rhs_v2);
srand48(1234);
randArray(Ntotal, 1., 1., &rhs_v3, &c_rhs_v3);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
int Ntests = 1;
for(int test=0;test<Ntests;++test){
hipLaunchKernelGGL(( volumerhs_v2<Nq, Np, nmoist, ntrace, nvar>) , dim3(G), dim3(B2) , 0, 0, c_rhs_v2, c_Q, c_vgeo, grav, c_D, nelem);
hipLaunchKernelGGL(( volumerhs_v3<Nq, Np, nmoist, ntrace, nvar>) , dim3(G), dim3(B3) , 0, 0, c_rhs_v3, c_Q, c_vgeo, grav, c_D, nelem);
}
hipMemcpy(rhs_v2, c_rhs_v2, Ntotal*sizeof(dfloat), hipMemcpyDeviceToHost);
hipMemcpy(rhs_v3, c_rhs_v3, Ntotal*sizeof(dfloat), hipMemcpyDeviceToHost);
dfloat maxDiff = 0;
for(int e=0;e<nelem;++e){
for(int v=0;v<nvar;++v){
for(int n=0;n<Np;++n){
int id = n + v*Np + e*nvar*Np;
dfloat diff = fabs(rhs_v2[id]-rhs_v3[id]);
if(diff>maxDiff)
maxDiff = diff;
// printf("id: %d, rhs:%lf\n", id, rhs_v3[id]);
//printf("(e: %d, v: %d, n: %d) v2 %lg, v3 %lg, diff %lg, maxdiff = %lg\n", e, v, n, rhs_v2[id], rhs_v3[id], diff, maxDiff);
}
}
}
printf("max diff = %lg\n", maxDiff);
hipDeviceSynchronize();
exit(0);
return 0;
}
| 2902879028e9bc863a11dbe71a69fbba07537fbc.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
//
// to run with 512 elements:
// ./vanilladeriv 512
#define dfloat float
// note the order of the fields below is also assumed in the code.
const int _nstate = 5;
const int _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int _nvgeo = 14;
const int _XIx = 0;
const int _ETAx = 1;
const int _ZETAx = 2;
const int _XIy = 3;
const int _ETAy = 4;
const int _ZETAy = 5;
const int _XIz = 6;
const int _ETAz = 7;
const int _ZETAz = 8;
const int _MJ = 9;
const int _MJI = 10;
const int _x = 11;
const int _y = 12;
const int _z = 13;
#define grav ((dfloat) 9.81)
#define R_d ((dfloat)287.0024093890231)
#define cp_d ((dfloat)1004.5084328615809)
#define cv_d ((dfloat)717.5060234725578)
#define gamma_d ((dfloat)1.4)
#define gdm1 ((dfloat)0.4)
// Volume RHS for 3-D
template <int Nq, int Np, int nmoist, int ntrace, int nvar>
__global__ void volumerhs_v2(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][Nq][_nstate];
__shared__ dfloat s_H[Nq][Nq][Nq][_nstate];
int e = blockIdx.x;
int k = threadIdx.z;
int j = threadIdx.y;
int i = threadIdx.x;
if(k == 0)
s_D[j][i] = D[j*Nq+i];
// Load values will need into registers
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][k][_R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][k][_U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][k][_V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][k][_W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][k][_E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][k][_R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][k][_U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][k][_V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][k][_W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][k][_E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
s_H[i][j][k][_R] = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
s_H[i][j][k][_U] = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
s_H[i][j][k][_V] = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
s_H[i][j][k][_W] = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
s_H[i][j][k][_E] = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
__syncthreads();
dfloat rhsU = 0, rhsV = 0, rhsW = 0, rhsR = 0, rhsE = 0;
dfloat MJI = vgeo[gid + _MJI*Np];
// buoyancy term
rhsW -= R * gravity;
// loop of XI-grid lines
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhsR += MJI_Dni * s_F[n][ j][ k][ _R];
rhsR += MJI_Dnj * s_G[i][ n][ k][ _R];
rhsR += MJI_Dnk * s_H[i][ j][ n][ _R];
rhsU += MJI_Dni * s_F[n][ j][ k][ _U];
rhsU += MJI_Dnj * s_G[i][ n][ k][ _U];
rhsU += MJI_Dnk * s_H[i][ j][ n][ _U];
rhsV += MJI_Dni * s_F[n][ j][ k][ _V];
rhsV += MJI_Dnj * s_G[i][ n][ k][ _V];
rhsV += MJI_Dnk * s_H[i][ j][ n][ _V];
rhsW += MJI_Dni * s_F[n][ j][ k][ _W];
rhsW += MJI_Dnj * s_G[i][ n][ k][ _W];
rhsW += MJI_Dnk * s_H[i][ j][ n][ _W];
rhsE += MJI_Dni * s_F[n][ j][ k][ _E];
rhsE += MJI_Dnj * s_G[i][ n][ k][ _E];
rhsE += MJI_Dnk * s_H[i][ j][ n][ _E];
}
rhs[qid + Np*_U] += rhsU;
rhs[qid + Np*_V] += rhsV;
rhs[qid + Np*_W] += rhsW;
rhs[qid + Np*_R] += rhsR;
rhs[qid + Np*_E] += rhsE;
// loop over moist variables
// FIXME: Currently just passive advection
// TODO: This should probably be unrolled by some factor
for(int m =0;m<nmoist;++m){
int s = _nstate + m;
__syncthreads();
dfloat Qmoist = Q[qid + s*Np];
dfloat fx = U * Rinv * Qmoist;
dfloat fy = V * Rinv * Qmoist;
dfloat fz = W * Rinv * Qmoist;
s_F[i][j][k][1] = MJ * (XIx * fx + XIy * fy + XIz * fz);
s_G[i][j][k][1] = MJ * (ETAx * fx + ETAy * fy + ETAz * fz);
s_H[i][j][k][1] = MJ * (ZETAx * fx + ZETAy * fy + ZETAz * fz);
__syncthreads();
dfloat rhsmoist = 0;
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhsmoist += MJI_Dni * s_F[n][j][k][1];
rhsmoist += MJI_Dnj * s_G[i][n][k][1];
rhsmoist += MJI_Dnk * s_H[i][j][n][1];
}
rhs[qid + s*Np] += rhsmoist;
}
// Loop over trace variables
// TODO: This should probably be unrolled by some factor
dfloat rhstrace = 0;
for(int m=0;m<ntrace;++m){
int s = _nstate + nmoist + m;
__syncthreads();
dfloat Qtrace = Q[qid+s*Np];
dfloat fx = U * Rinv * Qtrace;
dfloat fy = V * Rinv * Qtrace;
dfloat fz = W * Rinv * Qtrace;
s_F[i][j][k][1] = MJ * (XIx * fx + XIy * fy + XIz * fz);
s_G[i][j][k][1] = MJ * (ETAx * fx + ETAy * fy + ETAz * fz);
s_H[i][j][k][1] = MJ * (ZETAx * fx + ZETAy * fy + ZETAz * fz);
__syncthreads();
// TODO: Prefetch MJI and rhs
rhstrace = 0;
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat MJI_Dni = MJI * s_D[n][i];
dfloat MJI_Dnj = MJI * s_D[n][j];
dfloat MJI_Dnk = MJI * s_D[n][k];
rhstrace += MJI_Dni * s_F[n][j][k][1];
rhstrace += MJI_Dnj * s_G[i][n][k][1];
rhstrace += MJI_Dnk * s_H[i][j][n][1];
}
rhs[qid+s*Np] += rhstrace;
}
}
template <int Nq, int Np, int nmoist, int ntrace, int nvar>
__global__ void volumerhs_v3(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int e = blockIdx.x;
int j = threadIdx.y;
int i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
__syncthreads();
// Load values will need into registers
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat Dkn = s_D[k][n];
r_rhsR[n] += Dkn * r_HR;
r_rhsU[n] += Dkn * r_HU;
r_rhsV[n] += Dkn * r_HV;
r_rhsW[n] += Dkn * r_HW;
r_rhsE[n] += Dkn * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
int gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
void shiftEntries(int start, int end, dfloat shift, dfloat *q, dfloat *c_q){
for(int n=start;n<end;++n){
q[n] += shift;
}
cudaMemcpy(c_q+start, q+start, (end-start)*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
int nelem = atoi(argv[1]);
const int N = 4;
const int nmoist = 0;
const int ntrace = 0;
const int nvar = _nstate + nmoist + ntrace;
const int Nq = N+1;
const int Np = Nq*Nq*Nq;
const int Ntotal = Np*nelem*nvar;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int e=0;e<nelem;++e){
for(int n=0;n<Np;++n){
int idR = n + _R*Np + e*nvar*Np;
int idE = n + _E*Np + e*nvar*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
cudaMemcpy(c_Q, Q, nelem*nvar*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
const int Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int e=0;e<nelem;++e){
for(int n=0;n<Np;++n){
int idMJ = n + _MJ*Np + e*_nvgeo*Np;
int idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
cudaMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs_v2, *c_rhs_v2;
dfloat *rhs_v3, *c_rhs_v3;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs_v2, &c_rhs_v2);
srand48(1234);
randArray(Ntotal, 1., 1., &rhs_v3, &c_rhs_v3);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
int Ntests = 1;
for(int test=0;test<Ntests;++test){
volumerhs_v2<Nq, Np, nmoist, ntrace, nvar> <<< G, B2 >>> (c_rhs_v2, c_Q, c_vgeo, grav, c_D, nelem);
volumerhs_v3<Nq, Np, nmoist, ntrace, nvar> <<< G, B3 >>> (c_rhs_v3, c_Q, c_vgeo, grav, c_D, nelem);
}
cudaMemcpy(rhs_v2, c_rhs_v2, Ntotal*sizeof(dfloat), cudaMemcpyDeviceToHost);
cudaMemcpy(rhs_v3, c_rhs_v3, Ntotal*sizeof(dfloat), cudaMemcpyDeviceToHost);
dfloat maxDiff = 0;
for(int e=0;e<nelem;++e){
for(int v=0;v<nvar;++v){
for(int n=0;n<Np;++n){
int id = n + v*Np + e*nvar*Np;
dfloat diff = fabs(rhs_v2[id]-rhs_v3[id]);
if(diff>maxDiff)
maxDiff = diff;
// printf("id: %d, rhs:%lf\n", id, rhs_v3[id]);
//printf("(e: %d, v: %d, n: %d) v2 %lg, v3 %lg, diff %lg, maxdiff = %lg\n", e, v, n, rhs_v2[id], rhs_v3[id], diff, maxDiff);
}
}
}
printf("max diff = %lg\n", maxDiff);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
81ad6b27f9b105fa156515da4d2ca18b4ba8d4ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHDeviceUtils.cuh"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
struct TensorMaskedFillOp {
TensorMaskedFillOp(float v) : value(v) {}
__device__ __forceinline__ void operator()(float* t, float* mask) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
*t = value;
}
}
float value;
};
void THCudaTensor_maskedFill(THCState* state,
THCudaTensor *tensor, THCudaTensor *mask, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, mask));
THArgCheck(THCudaTensor_nElement(state, tensor) ==
THCudaTensor_nElement(state, mask),
2, "sizes do not match");
if (!THCudaTensor_pointwiseApply2(state, tensor, mask, TensorMaskedFillOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
struct TensorMaskedCopyOp {
TensorMaskedCopyOp(float* s) : src(s) {}
__device__ __forceinline__ void operator()(float* out, float* mask, float* maskPrefixSum) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
// We've already checked that this offset is <= 2^24, so this is ok.
*out = src[(int) *maskPrefixSum];
}
}
// Where we are copying from
float* src;
};
void THCudaTensor_maskedCopy(THCState* state,
THCudaTensor *tensor, THCudaTensor *mask, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, mask));
long maskSize = THCudaTensor_nElement(state, mask);
long tensorSize = THCudaTensor_nElement(state, tensor);
long srcSize = THCudaTensor_nElement(state, src);
// Since we are performing a prefix sum of mask, it cannot exceed
// the size allowed in consecutive integers in float32
THArgCheck(maskSize <= (long) FLOAT32_MAX_CONSECUTIVE_INT,
3, "mask nElements exceeds single-precision float "
"consecutive integer precision size (2^24)");
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
THCudaTensor* contigMask = THCudaTensor_newContiguous(state, mask);
long oneElements = (long) THCudaTensor_sumall(state, contigMask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (oneElements > srcSize) {
THCudaTensor_free(state, contigMask);
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// Use a prefix sum to determine the copy locations of the masked elements
THCudaTensor* maskPrefixSum = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, maskPrefixSum, contigMask);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCudaTensor* contigSrc = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float>
maskData(THCudaTensor_data(state, contigMask));
thrust::device_ptr<float>
maskPrefixSumData(THCudaTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaTensor_nElement(state, contigMask),
maskPrefixSumData);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THCudaTensor_pointwiseApply3(
state, tensor, contigMask, maskPrefixSum,
TensorMaskedCopyOp(THCudaTensor_data(state, contigSrc)));
THCudaTensor_free(state, contigSrc);
THCudaTensor_free(state, maskPrefixSum);
THCudaTensor_free(state, contigMask);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
struct TensorMaskedSelectOp {
TensorMaskedSelectOp(float* t) : out(t) {}
__device__ __forceinline__ void operator()(float* mask, float* maskPrefixSum, float* in) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
out[(int) *maskPrefixSum] = *in;
}
}
float* out;
};
void THCudaTensor_maskedSelect(THCState* state,
THCudaTensor *tensor, THCudaTensor *src, THCudaTensor *mask)
{
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, mask));
THArgCheck(THCudaTensor_nElement(state, mask) == THCudaTensor_nElement(state, src),
2, "sizes do not match");
// Since we are performing a prefix sum of mask, it cannot exceed
// the size allowed in consecutive integers in float32
THArgCheck(THCudaTensor_nElement(state, mask) <=
(long) FLOAT32_MAX_CONSECUTIVE_INT,
3, "mask nElements exceeds single-precision float "
"consecutive integer precision size (2^24)");
// Determine our output size
THCudaTensor* contigMask = THCudaTensor_newContiguous(state, mask);
long totalElements = (long) THCudaTensor_sumall(state, contigMask);
// This should be contiguous already, so no need to make it contig
// for the apply kernel
THCudaTensor_resize1d(state, tensor, totalElements);
// Use a prefix sum to determine the output locations of the masked elements
THCudaTensor* maskPrefixSum = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, maskPrefixSum, contigMask);
thrust::device_ptr<float>
maskData(THCudaTensor_data(state, contigMask));
thrust::device_ptr<float>
maskPrefixSumData(THCudaTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaTensor_nElement(state, contigMask),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THCudaTensor_pointwiseApply3(
state, contigMask, maskPrefixSum,
src, TensorMaskedSelectOp(THCudaTensor_data(state, tensor)));
THCudaTensor_free(state, contigMask);
THCudaTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
void THCudaTensor_maskedFillByte(THCState* state, THCudaTensor *tensor, THByteTensor *mask, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, tensor));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedFill(state, tensor, maskCuda, value);
THCudaTensor_free(state, maskCuda);
}
void THCudaTensor_maskedCopyByte(THCState* state, THCudaTensor *tensor, THByteTensor *mask, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, src));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedCopy(state, tensor, maskCuda, src);
THCudaTensor_free(state, maskCuda);
}
void THCudaTensor_maskedSelectByte(THCState* state, THCudaTensor *tensor, THCudaTensor *src, THByteTensor *mask)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, src));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedSelect(state, tensor, src, maskCuda);
THCudaTensor_free(state, maskCuda);
}
| 81ad6b27f9b105fa156515da4d2ca18b4ba8d4ac.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCDeviceUtils.cuh"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
struct TensorMaskedFillOp {
TensorMaskedFillOp(float v) : value(v) {}
__device__ __forceinline__ void operator()(float* t, float* mask) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
*t = value;
}
}
float value;
};
void THCudaTensor_maskedFill(THCState* state,
THCudaTensor *tensor, THCudaTensor *mask, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, mask));
THArgCheck(THCudaTensor_nElement(state, tensor) ==
THCudaTensor_nElement(state, mask),
2, "sizes do not match");
if (!THCudaTensor_pointwiseApply2(state, tensor, mask, TensorMaskedFillOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
struct TensorMaskedCopyOp {
TensorMaskedCopyOp(float* s) : src(s) {}
__device__ __forceinline__ void operator()(float* out, float* mask, float* maskPrefixSum) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
// We've already checked that this offset is <= 2^24, so this is ok.
*out = src[(int) *maskPrefixSum];
}
}
// Where we are copying from
float* src;
};
void THCudaTensor_maskedCopy(THCState* state,
THCudaTensor *tensor, THCudaTensor *mask, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, mask));
long maskSize = THCudaTensor_nElement(state, mask);
long tensorSize = THCudaTensor_nElement(state, tensor);
long srcSize = THCudaTensor_nElement(state, src);
// Since we are performing a prefix sum of mask, it cannot exceed
// the size allowed in consecutive integers in float32
THArgCheck(maskSize <= (long) FLOAT32_MAX_CONSECUTIVE_INT,
3, "mask nElements exceeds single-precision float "
"consecutive integer precision size (2^24)");
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
THCudaTensor* contigMask = THCudaTensor_newContiguous(state, mask);
long oneElements = (long) THCudaTensor_sumall(state, contigMask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (oneElements > srcSize) {
THCudaTensor_free(state, contigMask);
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// Use a prefix sum to determine the copy locations of the masked elements
THCudaTensor* maskPrefixSum = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, maskPrefixSum, contigMask);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCudaTensor* contigSrc = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float>
maskData(THCudaTensor_data(state, contigMask));
thrust::device_ptr<float>
maskPrefixSumData(THCudaTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaTensor_nElement(state, contigMask),
maskPrefixSumData);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THCudaTensor_pointwiseApply3(
state, tensor, contigMask, maskPrefixSum,
TensorMaskedCopyOp(THCudaTensor_data(state, contigSrc)));
THCudaTensor_free(state, contigSrc);
THCudaTensor_free(state, maskPrefixSum);
THCudaTensor_free(state, contigMask);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
struct TensorMaskedSelectOp {
TensorMaskedSelectOp(float* t) : out(t) {}
__device__ __forceinline__ void operator()(float* mask, float* maskPrefixSum, float* in) {
// Really mask should be `0` or `1` but we can't propagate errors here.
if (*mask != 0.0f) {
out[(int) *maskPrefixSum] = *in;
}
}
float* out;
};
void THCudaTensor_maskedSelect(THCState* state,
THCudaTensor *tensor, THCudaTensor *src, THCudaTensor *mask)
{
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, mask));
THArgCheck(THCudaTensor_nElement(state, mask) == THCudaTensor_nElement(state, src),
2, "sizes do not match");
// Since we are performing a prefix sum of mask, it cannot exceed
// the size allowed in consecutive integers in float32
THArgCheck(THCudaTensor_nElement(state, mask) <=
(long) FLOAT32_MAX_CONSECUTIVE_INT,
3, "mask nElements exceeds single-precision float "
"consecutive integer precision size (2^24)");
// Determine our output size
THCudaTensor* contigMask = THCudaTensor_newContiguous(state, mask);
long totalElements = (long) THCudaTensor_sumall(state, contigMask);
// This should be contiguous already, so no need to make it contig
// for the apply kernel
THCudaTensor_resize1d(state, tensor, totalElements);
// Use a prefix sum to determine the output locations of the masked elements
THCudaTensor* maskPrefixSum = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, maskPrefixSum, contigMask);
thrust::device_ptr<float>
maskData(THCudaTensor_data(state, contigMask));
thrust::device_ptr<float>
maskPrefixSumData(THCudaTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaTensor_nElement(state, contigMask),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THCudaTensor_pointwiseApply3(
state, contigMask, maskPrefixSum,
src, TensorMaskedSelectOp(THCudaTensor_data(state, tensor)));
THCudaTensor_free(state, contigMask);
THCudaTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
void THCudaTensor_maskedFillByte(THCState* state, THCudaTensor *tensor, THByteTensor *mask, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, tensor));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedFill(state, tensor, maskCuda, value);
THCudaTensor_free(state, maskCuda);
}
void THCudaTensor_maskedCopyByte(THCState* state, THCudaTensor *tensor, THByteTensor *mask, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, src));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedCopy(state, tensor, maskCuda, src);
THCudaTensor_free(state, maskCuda);
}
void THCudaTensor_maskedSelectByte(THCState* state, THCudaTensor *tensor, THCudaTensor *src, THByteTensor *mask)
{
THAssert(THCudaTensor_checkGPU(state, 2, tensor, src));
THLongStorage* maskSize = THByteTensor_newSizeOf(mask);
THCudaTensor* maskCuda = THCudaTensor_newWithSize(state, maskSize, NULL);
THLongStorage_free(maskSize);
THCudaTensor_copyByte(state, maskCuda, mask);
THCudaTensor_maskedSelect(state, tensor, src, maskCuda);
THCudaTensor_free(state, maskCuda);
}
|
5919776d5baeb97c28be8c86c7f0f299bb19d19b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
============================================================================
Name : Teste.cu
Author :
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
static void CheckCudaErrorAux(const char *, unsigned, const char *,
hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
/**
* Host function that copies the data and launches the work on GPU
*/
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0 / data[idx];
} | 5919776d5baeb97c28be8c86c7f0f299bb19d19b.cu | #include "includes.h"
/*
============================================================================
Name : Teste.cu
Author :
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
/**
* Host function that copies the data and launches the work on GPU
*/
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0 / data[idx];
} |
37c17341821bcaa08e2765145748bcf89c818edc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cublas_interface.h"
// ================
// cublas interface
// ================
/// \note The implementation in the \c cu file is wrapped inside the
/// namepsace clause. This is not necessary in general, however, it
/// is needed to avoid the old gcc compiler error (this is a gcc
/// bug) which complains "no instance of function template matches
/// the argument list const float".
namespace cublas_interface
{
// ===========
// cublasXgemv (float)
// ===========
/// \brief A template wrapper for \c hipblasSgemv.
///
template<>
hipblasStatus_t cublasXgemv<float>(
hipblasHandle_t handle,
hipblasOperation_t trans,
int m,
int n,
const float* alpha,
const float* A,
int lda,
const float* x,
int incx,
const float* beta,
float* y,
int incy)
{
return hipblasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta,
y, incy);
}
// ===========
// cublasXgemv (double)
// ===========
/// \brief A template wrapper for \c hipblasDgemv.
///
template<>
hipblasStatus_t cublasXgemv<double>(
hipblasHandle_t handle,
hipblasOperation_t trans,
int m,
int n,
const double* alpha,
const double* A,
int lda,
const double* x,
int incx,
const double* beta,
double* y,
int incy)
{
return hipblasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta,
y, incy);
}
// ===========
// cublasXcopy (float)
// ===========
/// \brief A template wrapper for \c hipblasScopy.
///
template<>
hipblasStatus_t cublasXcopy<float>(
hipblasHandle_t handle,
int n,
const float* x,
int incx,
float* y,
int incy)
{
return hipblasScopy(handle, n, x, incx, y, incy);
}
// ===========
// cublasXcopy (double)
// ===========
/// \brief A template wrapper for \c hipblasDcopy.
///
template<>
hipblasStatus_t cublasXcopy<double>(
hipblasHandle_t handle,
int n,
const double* x,
int incx,
double* y,
int incy)
{
return hipblasDcopy(handle, n, x, incx, y, incy);
}
// ===========
// cublasXaxpy (float)
// ===========
/// \brief A template wrapper for \c hipblasSaxpy
///
template<>
hipblasStatus_t cublasXaxpy<float>(
hipblasHandle_t handle,
int n,
const float *alpha,
const float *x,
int incx,
float *y,
int incy)
{
return hipblasSaxpy(handle, n, alpha, x, incx, y, incy);
}
// ===========
// cublasXaxpy (double)
// ===========
/// \brief A template wrapper for \c hipblasDaxpy
///
template<>
hipblasStatus_t cublasXaxpy<double>(
hipblasHandle_t handle,
int n,
const double *alpha,
const double *x,
int incx,
double *y,
int incy)
{
return hipblasDaxpy(handle, n, alpha, x, incx, y, incy);
}
// ==========
// cublasXdot (float)
// ==========
/// \brief A template wrapper for \c hipblasSdot
///
template<>
hipblasStatus_t cublasXdot<float>(
hipblasHandle_t handle,
int n,
const float *x,
int incx,
const float *y,
int incy,
float *result)
{
return hipblasSdot(handle, n, x, incx, y, incy, result);
}
// ==========
// cublasXdot (double)
// ==========
/// \brief A template wrapper for \c hipblasDdot
///
template<>
hipblasStatus_t cublasXdot<double>(
hipblasHandle_t handle,
int n,
const double *x,
int incx,
const double *y,
int incy,
double *result)
{
return hipblasDdot(handle, n, x, incx, y, incy, result);
}
// ===========
// cublasXnrm2 (float)
// ===========
/// \brief A template wrapper to \c hipblasSnrm2
///
template<>
hipblasStatus_t cublasXnrm2<float>(
hipblasHandle_t handle,
int n,
const float *x,
int incx,
float *result)
{
return hipblasSnrm2(handle, n, x, incx, result);
}
// ===========
// cublasXnrm2 (double)
// ===========
/// \brief A template wrapper to \c hipblasDnrm2
///
template<>
hipblasStatus_t cublasXnrm2<double>(
hipblasHandle_t handle,
int n,
const double *x,
int incx,
double *result)
{
return hipblasDnrm2(handle, n, x, incx, result);
}
// ===========
// cublasXscal (float)
// ===========
/// \brief A template wrapper for \c hipblasSscal.
///
template<>
hipblasStatus_t cublasXscal<float>(
hipblasHandle_t handle,
int n,
const float *alpha,
float *x,
int incx)
{
return hipblasSscal(handle, n, alpha, x, incx);
}
// ===========
// cublasXscal (double)
// ===========
/// \brief A template wrapper for \c hipblasDscal.
///
template<>
hipblasStatus_t cublasXscal<double>(
hipblasHandle_t handle,
int n,
const double *alpha,
double *x,
int incx)
{
return hipblasDscal(handle, n, alpha, x, incx);
}
} // namespace cublas_interface
| 37c17341821bcaa08e2765145748bcf89c818edc.cu | /*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cublas_interface.h"
// ================
// cublas interface
// ================
/// \note The implementation in the \c cu file is wrapped inside the
/// namepsace clause. This is not necessary in general, however, it
/// is needed to avoid the old gcc compiler error (this is a gcc
/// bug) which complains "no instance of function template matches
/// the argument list const float".
namespace cublas_interface
{
// ===========
// cublasXgemv (float)
// ===========
/// \brief A template wrapper for \c cublasSgemv.
///
template<>
cublasStatus_t cublasXgemv<float>(
cublasHandle_t handle,
cublasOperation_t trans,
int m,
int n,
const float* alpha,
const float* A,
int lda,
const float* x,
int incx,
const float* beta,
float* y,
int incy)
{
return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta,
y, incy);
}
// ===========
// cublasXgemv (double)
// ===========
/// \brief A template wrapper for \c cublasDgemv.
///
template<>
cublasStatus_t cublasXgemv<double>(
cublasHandle_t handle,
cublasOperation_t trans,
int m,
int n,
const double* alpha,
const double* A,
int lda,
const double* x,
int incx,
const double* beta,
double* y,
int incy)
{
return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta,
y, incy);
}
// ===========
// cublasXcopy (float)
// ===========
/// \brief A template wrapper for \c cublasScopy.
///
template<>
cublasStatus_t cublasXcopy<float>(
cublasHandle_t handle,
int n,
const float* x,
int incx,
float* y,
int incy)
{
return cublasScopy(handle, n, x, incx, y, incy);
}
// ===========
// cublasXcopy (double)
// ===========
/// \brief A template wrapper for \c cublasDcopy.
///
template<>
cublasStatus_t cublasXcopy<double>(
cublasHandle_t handle,
int n,
const double* x,
int incx,
double* y,
int incy)
{
return cublasDcopy(handle, n, x, incx, y, incy);
}
// ===========
// cublasXaxpy (float)
// ===========
/// \brief A template wrapper for \c cublasSaxpy
///
template<>
cublasStatus_t cublasXaxpy<float>(
cublasHandle_t handle,
int n,
const float *alpha,
const float *x,
int incx,
float *y,
int incy)
{
return cublasSaxpy(handle, n, alpha, x, incx, y, incy);
}
// ===========
// cublasXaxpy (double)
// ===========
/// \brief A template wrapper for \c cublasDaxpy
///
template<>
cublasStatus_t cublasXaxpy<double>(
cublasHandle_t handle,
int n,
const double *alpha,
const double *x,
int incx,
double *y,
int incy)
{
return cublasDaxpy(handle, n, alpha, x, incx, y, incy);
}
// ==========
// cublasXdot (float)
// ==========
/// \brief A template wrapper for \c cublasSdot
///
template<>
cublasStatus_t cublasXdot<float>(
cublasHandle_t handle,
int n,
const float *x,
int incx,
const float *y,
int incy,
float *result)
{
return cublasSdot(handle, n, x, incx, y, incy, result);
}
// ==========
// cublasXdot (double)
// ==========
/// \brief A template wrapper for \c cublasDdot
///
template<>
cublasStatus_t cublasXdot<double>(
cublasHandle_t handle,
int n,
const double *x,
int incx,
const double *y,
int incy,
double *result)
{
return cublasDdot(handle, n, x, incx, y, incy, result);
}
// ===========
// cublasXnrm2 (float)
// ===========
/// \brief A template wrapper to \c cublasSnrm2
///
template<>
cublasStatus_t cublasXnrm2<float>(
cublasHandle_t handle,
int n,
const float *x,
int incx,
float *result)
{
return cublasSnrm2(handle, n, x, incx, result);
}
// ===========
// cublasXnrm2 (double)
// ===========
/// \brief A template wrapper to \c cublasDnrm2
///
template<>
cublasStatus_t cublasXnrm2<double>(
cublasHandle_t handle,
int n,
const double *x,
int incx,
double *result)
{
return cublasDnrm2(handle, n, x, incx, result);
}
// ===========
// cublasXscal (float)
// ===========
/// \brief A template wrapper for \c cublasSscal.
///
template<>
cublasStatus_t cublasXscal<float>(
cublasHandle_t handle,
int n,
const float *alpha,
float *x,
int incx)
{
return cublasSscal(handle, n, alpha, x, incx);
}
// ===========
// cublasXscal (double)
// ===========
/// \brief A template wrapper for \c cublasDscal.
///
template<>
cublasStatus_t cublasXscal<double>(
cublasHandle_t handle,
int n,
const double *alpha,
double *x,
int incx)
{
return cublasDscal(handle, n, alpha, x, incx);
}
} // namespace cublas_interface
|
26954a82bd4b2f8663e5efa729c8a01be1556574.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#define BLOCKSIZE 512
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
namespace swish_space {
template<typename scalar_t>
__forceinline__ __device__ scalar_t ReLU6(scalar_t val) {
const scalar_t zero(0.);
const scalar_t six(6.);
scalar_t res = val;
if (res < zero) res = zero;
if (res > six) res = six;
return res;
}
}
template<typename scalar_t>
__global__ void HSwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
activations[i] = val * swish_space::ReLU6(val + three) * one_six;
}
}
template<typename scalar_t>
__global__ void HSwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t zero(0.);
const scalar_t _three(-3.);
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
grad_feat[i] = (swish_space::ReLU6(val + three) * one_six + ((val > _three && val < three) ? one_six : zero) * val) * grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(hipGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
hipLaunchKernelGGL(( SwishForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
hipLaunchKernelGGL(( SwishBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return grad_feat;
}
at::Tensor HSwish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(hipGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "hswish forward", [&] {
hipLaunchKernelGGL(( HSwishForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return activations;
}
at::Tensor HSwish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "hswish backwrd", [&] {
hipLaunchKernelGGL(( HSwishBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
at::Tensor HSwish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_forward_cuda(feat);
}
at::Tensor HSwish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
m.def("hswish_forward", &HSwish_forward, "hswish forward");
m.def("hswish_backward", &HSwish_backward, "hswish backward");
}
| 26954a82bd4b2f8663e5efa729c8a01be1556574.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#define BLOCKSIZE 512
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
namespace swish_space {
template<typename scalar_t>
__forceinline__ __device__ scalar_t ReLU6(scalar_t val) {
const scalar_t zero(0.);
const scalar_t six(6.);
scalar_t res = val;
if (res < zero) res = zero;
if (res > six) res = six;
return res;
}
}
template<typename scalar_t>
__global__ void HSwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
activations[i] = val * swish_space::ReLU6(val + three) * one_six;
}
}
template<typename scalar_t>
__global__ void HSwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t zero(0.);
const scalar_t _three(-3.);
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
grad_feat[i] = (swish_space::ReLU6(val + three) * one_six + ((val > _three && val < three) ? one_six : zero) * val) * grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(cudaGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
SwishForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
SwishBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return grad_feat;
}
at::Tensor HSwish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(cudaGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "hswish forward", [&] {
HSwishForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return activations;
}
at::Tensor HSwish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "hswish backwrd", [&] {
HSwishBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
at::Tensor HSwish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_forward_cuda(feat);
}
at::Tensor HSwish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
m.def("hswish_forward", &HSwish_forward, "hswish forward");
m.def("hswish_backward", &HSwish_backward, "hswish backward");
}
|
c9dbafea462a0aeea4e0c8aa31e0c80ec9e53b68.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
//xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0) //*the null pointer occurs when i ==0, this is the case*//
x = x + 5;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
int main(){
float* w;
float* dev_w;
int nondet;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
hipMalloc((void**)&dev_w, size);
hipMemcpy(dev_w,w, size,hipMemcpyHostToDevice);
funcType host_f;
hipMemcpyFromSymbol( &host_f, p_div_func, sizeof( funcType ) );
funcType dev_f = host_f;
hipLaunchKernelGGL(( foo) , dim3(1),dim3(N), 0, 0, dev_w, dev_f, N, nondet);
hipDeviceSynchronize();
hipMemcpy(w,dev_w,size,hipMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
free(w);
return EXIT_SUCCESS;
}
| c9dbafea462a0aeea4e0c8aa31e0c80ec9e53b68.cu | #include <call_kernel.h>
//xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0) //*the null pointer occurs when i ==0, this is the case*//
x = x + 5;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
int main(){
float* w;
float* dev_w;
int nondet;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
cudaMalloc((void**)&dev_w, size);
cudaMemcpy(dev_w,w, size,cudaMemcpyHostToDevice);
funcType host_f;
cudaMemcpyFromSymbol( &host_f, p_div_func, sizeof( funcType ) );
funcType dev_f = host_f;
foo <<<1,N>>>(dev_w, dev_f, N, nondet);
cudaThreadSynchronize();
cudaMemcpy(w,dev_w,size,cudaMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
free(w);
return EXIT_SUCCESS;
}
|
cdf4c385c44531f4fa30800529d49f0817fc24ac.hip | // !!! This is a file automatically generated by hipify!!!
/* game.cu
* Jonathan Lehman
* April 17, 2012
*
* Compile with: nvcc -o game game.cu
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__ void queen(long*, int);
__device__ void sumBlocks(long *);
void checkArgs(int, char**, int);
void checkGPUCapabilities(int, int, int, int, int);
double getTime();
//set board size
#ifndef _N_
#define _N_ 4
#endif
// Keep track of the gpu time.
hipEvent_t start, stop;
float elapsedTime;
// Keep track of the CPU time.
double startTime, stopTime;
//array for block sums
long *a;
int main(int argc, char *argv[]){
long *dev_a;
//check validity of arguments (should be no arguments)
checkArgs(argc, argv, 1);
int gW, gH, numberBlocks;
//calculate grid width based on factor N,
gW = pow(_N_, numBX);
//depends on if N is even or odd
int sizePerYSeg = (_N_ / 2) + (_N_ % 2);
gH = sizePerYSeg * numBY;
numberBlocks = gW * gH;
//check that GPU can handle arguments
checkGPUCapabilities(gW, gH, _N_, _N_, numberBlocks);
/* Initialize the source arrays here. */
a = new long[numberBlocks];
/* Allocate global device memory. */
hipMalloc((void **)&dev_a, sizeof(long) * numberBlocks);
/* Start the timer. */
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(_N_, _N_); //threads w x h
dim3 grid(gW, gH); //blocks w x h
hipLaunchKernelGGL(( queen), dim3(grid), dim3(block), 0, 0, dev_a, sizePerYSeg);
/* Wait for the kernel to complete. Needed for timing. */
hipDeviceSynchronize();
/* Stop the timer and print the resulting time. */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
/* Get result from device. */
hipMemcpy(a, dev_a, sizeof(long) * numberBlocks, hipMemcpyDeviceToHost);
//print any cuda error messages
const char* errorString = hipGetErrorString(hipGetLastError());
printf("GPU Error: %s\n", errorString);
if(sumOnGPU){
printf("Number of Solutions:%d\n", a[0]);
//add cpu time and gpu time and print result
printf( "GPU Time/Total Time: %f secs\n", (elapsedTime / 1000.0));
}
else{
/* Start the CPU timer. */
startTime = getTime();
int sum = 0;
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
int rowSizeOfGrid = pow(_N_, numBX);
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
/* Stop the CPU timer */
stopTime = getTime();
double totalTime = stopTime - startTime;
printf("Number of Solutions: %d\n", sum);
//add cpu time and gpu time and print result
printf( "GPU Time: %f secs\nCPU Time: %f secs\nTotal Time: %f secs\n", (elapsedTime / 1000.0), totalTime, (elapsedTime / 1000.0) + totalTime );
}
//destroy cuda event
hipEventDestroy(start);
hipEventDestroy(stop);
/* Free the allocated device memory. */
hipFree(dev_a);
//free allocated host memory
free(a);
}
__global__
void queen(long *a, int sizePerYSeg){
__shared__ long solutions[_N_][_N_];
__shared__ char tuple[_N_][_N_][_N_];
int totalWrong = 0;
solutions[threadIdx.x][threadIdx.y] = 0;
int totNumGen = powf(_N_, numGen);
int bYsegment = blockIdx.y / sizePerYSeg;
int workSize = totNumGen / numBY;
int extra = totNumGen - workSize * numBY;//extra work to be done by last segment
//set tuple by block Y value
tuple[threadIdx.x][threadIdx.y][0] = blockIdx.y % sizePerYSeg;
//set tuple(s) by block X value
int rem = blockIdx.x;
for(int i = 1; i <= numBX; i++){
tuple[threadIdx.x][threadIdx.y][i] = rem % _N_;
rem = rem / _N_;
}
int tupCtr = numBX;
//set tuples by thread value
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.x;
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.y;
//check if thread is valid at this point
for(int i = tupCtr; i > 0; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
if(totalWrong == 0){
//iterate through all numbers to generate possible solutions thread must check
//does not do if thread is already not valid at this point
int start = bYsegment * workSize;
for(int c = start; c < start + workSize + (bYsegment == numBY - 1) * extra; c++){
//generate last values in tuple, convert to base N and store to tuple array
int rem = c;
for(int b = 0, k = tupCtr + 1; b < numGen; b++, k++){
tuple[threadIdx.x][threadIdx.y][k] = rem % _N_;
rem = rem / _N_;
}
//checks that the numGen tuple values are indeed unique (saves work overall)
for(int x = 0; x < numGen && totalWrong == 0; x++){
for(int y = 0; y < numGen && totalWrong == 0; y++){
totalWrong += tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + x] == tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + y] && x != y;
}
}
//check one solution
for(int i = _N_ - 1; i > totalWrong * _N_; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
//add 1 to solution total if nothing wrong
solutions[threadIdx.x][threadIdx.y] += !(totalWrong);
//reset total wrong
totalWrong = 0;
}
}
//sync the threads so that thread 0 can make the calculations
__syncthreads();
//have thread 0 sum for all threads in block to get block total
if(threadIdx.x == 0 && threadIdx.y == 0){
//ensure that the block total value is 0 initially
long sum = 0;
//iterate through each threads solution and add it to the block total
for(int i =0; i < _N_; i++){
for(int j = 0; j < _N_; j++){
//use local var
sum += solutions[i][j];
}
}
//store to global memory
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
//sync the threads so that calculations can be made
__syncthreads();
//have the first thread in the first block sum up the block sums to return to the CPU
if(sumOnGPU == 1 && blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0){
sumBlocks(a);
}
}
__device__
void sumBlocks(long *a){
long sum = 0;
int numberBlocks = gridDim.x * gridDim.y;
int rowSizeOfGrid = powf(_N_, numBX);
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
//store sum to first index of a
a[gridDim.x * blockIdx.y + blockIdx.x] = 0;
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
void checkArgs(int argc, char *argv[], int numArgs){
//check number of arguments
if(argc > numArgs){
fprintf(stderr, "\nnqueens: Incorrect number of arguments, %d\nCorrect usage: \"nqueens\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < numArgs; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nnqueens: Overflow. Invalid argument %d for nqueens, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nnqueens: Invalid argument %d for nqueens, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
hipGetDevice( &devId );
//get device properties for GPU being used
hipDeviceProp_t gpuProp;
hipGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory
if(gpuProp.totalGlobalMem < (size * sizeof(long))){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
double getTime(){
timeval thetime;
gettimeofday(&thetime, 0);
return thetime.tv_sec + thetime.tv_usec / 1000000.0;
}
| cdf4c385c44531f4fa30800529d49f0817fc24ac.cu | /* game.cu
* Jonathan Lehman
* April 17, 2012
*
* Compile with: nvcc -o game game.cu
*
*/
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__ void queen(long*, int);
__device__ void sumBlocks(long *);
void checkArgs(int, char**, int);
void checkGPUCapabilities(int, int, int, int, int);
double getTime();
//set board size
#ifndef _N_
#define _N_ 4
#endif
// Keep track of the gpu time.
cudaEvent_t start, stop;
float elapsedTime;
// Keep track of the CPU time.
double startTime, stopTime;
//array for block sums
long *a;
int main(int argc, char *argv[]){
long *dev_a;
//check validity of arguments (should be no arguments)
checkArgs(argc, argv, 1);
int gW, gH, numberBlocks;
//calculate grid width based on factor N,
gW = pow(_N_, numBX);
//depends on if N is even or odd
int sizePerYSeg = (_N_ / 2) + (_N_ % 2);
gH = sizePerYSeg * numBY;
numberBlocks = gW * gH;
//check that GPU can handle arguments
checkGPUCapabilities(gW, gH, _N_, _N_, numberBlocks);
/* Initialize the source arrays here. */
a = new long[numberBlocks];
/* Allocate global device memory. */
cudaMalloc((void **)&dev_a, sizeof(long) * numberBlocks);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(_N_, _N_); //threads w x h
dim3 grid(gW, gH); //blocks w x h
queen<<<grid, block>>>(dev_a, sizePerYSeg);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* Get result from device. */
cudaMemcpy(a, dev_a, sizeof(long) * numberBlocks, cudaMemcpyDeviceToHost);
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
if(sumOnGPU){
printf("Number of Solutions:%d\n", a[0]);
//add cpu time and gpu time and print result
printf( "GPU Time/Total Time: %f secs\n", (elapsedTime / 1000.0));
}
else{
/* Start the CPU timer. */
startTime = getTime();
int sum = 0;
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
int rowSizeOfGrid = pow(_N_, numBX);
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
/* Stop the CPU timer */
stopTime = getTime();
double totalTime = stopTime - startTime;
printf("Number of Solutions: %d\n", sum);
//add cpu time and gpu time and print result
printf( "GPU Time: %f secs\nCPU Time: %f secs\nTotal Time: %f secs\n", (elapsedTime / 1000.0), totalTime, (elapsedTime / 1000.0) + totalTime );
}
//destroy cuda event
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* Free the allocated device memory. */
cudaFree(dev_a);
//free allocated host memory
free(a);
}
__global__
void queen(long *a, int sizePerYSeg){
__shared__ long solutions[_N_][_N_];
__shared__ char tuple[_N_][_N_][_N_];
int totalWrong = 0;
solutions[threadIdx.x][threadIdx.y] = 0;
int totNumGen = powf(_N_, numGen);
int bYsegment = blockIdx.y / sizePerYSeg;
int workSize = totNumGen / numBY;
int extra = totNumGen - workSize * numBY;//extra work to be done by last segment
//set tuple by block Y value
tuple[threadIdx.x][threadIdx.y][0] = blockIdx.y % sizePerYSeg;
//set tuple(s) by block X value
int rem = blockIdx.x;
for(int i = 1; i <= numBX; i++){
tuple[threadIdx.x][threadIdx.y][i] = rem % _N_;
rem = rem / _N_;
}
int tupCtr = numBX;
//set tuples by thread value
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.x;
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.y;
//check if thread is valid at this point
for(int i = tupCtr; i > 0; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
if(totalWrong == 0){
//iterate through all numbers to generate possible solutions thread must check
//does not do if thread is already not valid at this point
int start = bYsegment * workSize;
for(int c = start; c < start + workSize + (bYsegment == numBY - 1) * extra; c++){
//generate last values in tuple, convert to base N and store to tuple array
int rem = c;
for(int b = 0, k = tupCtr + 1; b < numGen; b++, k++){
tuple[threadIdx.x][threadIdx.y][k] = rem % _N_;
rem = rem / _N_;
}
//checks that the numGen tuple values are indeed unique (saves work overall)
for(int x = 0; x < numGen && totalWrong == 0; x++){
for(int y = 0; y < numGen && totalWrong == 0; y++){
totalWrong += tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + x] == tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + y] && x != y;
}
}
//check one solution
for(int i = _N_ - 1; i > totalWrong * _N_; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
//add 1 to solution total if nothing wrong
solutions[threadIdx.x][threadIdx.y] += !(totalWrong);
//reset total wrong
totalWrong = 0;
}
}
//sync the threads so that thread 0 can make the calculations
__syncthreads();
//have thread 0 sum for all threads in block to get block total
if(threadIdx.x == 0 && threadIdx.y == 0){
//ensure that the block total value is 0 initially
long sum = 0;
//iterate through each threads solution and add it to the block total
for(int i =0; i < _N_; i++){
for(int j = 0; j < _N_; j++){
//use local var
sum += solutions[i][j];
}
}
//store to global memory
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
//sync the threads so that calculations can be made
__syncthreads();
//have the first thread in the first block sum up the block sums to return to the CPU
if(sumOnGPU == 1 && blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0){
sumBlocks(a);
}
}
__device__
void sumBlocks(long *a){
long sum = 0;
int numberBlocks = gridDim.x * gridDim.y;
int rowSizeOfGrid = powf(_N_, numBX);
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
//store sum to first index of a
a[gridDim.x * blockIdx.y + blockIdx.x] = 0;
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
void checkArgs(int argc, char *argv[], int numArgs){
//check number of arguments
if(argc > numArgs){
fprintf(stderr, "\nnqueens: Incorrect number of arguments, %d\nCorrect usage: \"nqueens\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < numArgs; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nnqueens: Overflow. Invalid argument %d for nqueens, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nnqueens: Invalid argument %d for nqueens, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory
if(gpuProp.totalGlobalMem < (size * sizeof(long))){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
double getTime(){
timeval thetime;
gettimeofday(&thetime, 0);
return thetime.tv_sec + thetime.tv_usec / 1000000.0;
}
|
a05854e8634698d8cb28668bf61b11a50597521c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#endif
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
} // anonymous namespace
TORCH_IMPL_FUNC(fractional_max_pool2d_out_cuda) (
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor64<scalar_t, 4>();
auto devOutput = output_.packed_accessor64<scalar_t, 4>();
auto devIndices = indices_.packed_accessor64<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor64<scalar_t, 3>();
hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
TORCH_IMPL_FUNC(fractional_max_pool2d_backward_cuda)(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices,
const Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
if (gradInput.numel() == 0) {
return;
}
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices_.packed_accessor64<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>();
hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, devIndices);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
}// at::native
}// at
| a05854e8634698d8cb28668bf61b11a50597521c.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#endif
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
} // anonymous namespace
TORCH_IMPL_FUNC(fractional_max_pool2d_out_cuda) (
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor64<scalar_t, 4>();
auto devOutput = output_.packed_accessor64<scalar_t, 4>();
auto devIndices = indices_.packed_accessor64<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor64<scalar_t, 3>();
fractional_max_pool2d_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
TORCH_IMPL_FUNC(fractional_max_pool2d_backward_cuda)(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices,
const Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
if (gradInput.numel() == 0) {
return;
}
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices_.packed_accessor64<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>();
fractional_max_pool2d_backward_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, devIndices);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
}// at::native
}// at
|
daa4b005f8efcd80a7b1e8511bab98230a18d0e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/Activation.h>
#include <ATen/native/hip/Loops.cuh>
namespace at { namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
input,
result,
[=] __device__ (
const scalar_t& input_val,
scalar_t& result_val) {
result_val = (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
AT_CHECK(self.is_cuda());
AT_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
AT_CHECK(input.is_contiguous());
AT_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
AT_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
AT_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
AT_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
[=] __device__ (
const scalar_t& input_val,
const scalar_t& grad_out_val,
scalar_t& input_grad_val,
scalar_t& weight_grad_collector_val) {
input_grad_val = (input_val > 0) ? grad_out_val : *weight_data * grad_out_val;
weight_grad_collector_val = (input_val > 0) ? scalar_t(0) : input_val * grad_out_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
AT_CHECK(grad_out_.is_cuda());
AT_CHECK(self.is_cuda());
AT_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
AT_CHECK(input.is_contiguous());
AT_CHECK(weight.is_contiguous());
AT_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input);
Tensor weight_grad = at::empty_like(weight);
Tensor weight_grad_collector = at::empty_like(input);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
AT_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
AT_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
AT_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t>(),
weight.data<scalar_t>(),
grad_out.data<scalar_t>(),
input_grad.data<scalar_t>(),
weight_grad_collector.data<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
template <typename scalar_t>
void hardshrink_cuda_kernel(const Tensor& self, Tensor& out_tensor, scalar_t lambd) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
self,
out_tensor,
[=] __device__ (
scalar_t& self_val,
scalar_t& out_tensor_val) {
out_tensor_val = (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : self_val;
});
}
template <typename scalar_t>
void hardshrink_backward_cuda_kernel(const Tensor& self, Tensor& out_tensor, scalar_t lambd, const Tensor& grad) {
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
self,
grad,
out_tensor,
[=] __device__ (
scalar_t& self_val,
scalar_t& grad_val,
scalar_t& out_tensor_val) {
out_tensor_val = (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
}
Tensor hardshrink_cuda(const Tensor & self, Scalar lambd) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "hardshrink_cuda", [&] {
hardshrink_cuda_kernel<scalar_t>(self, out_tensor, lambd.to<scalar_t>());
});
return out_tensor;
}
Tensor hardshrink_backward_cuda(const Tensor & grad, const Tensor & self, Scalar lambd) {
auto out_tensor = at::empty_like(grad);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "hardshrink_backward_cuda", [&] {
hardshrink_backward_cuda_kernel<scalar_t>(self, out_tensor, lambd.to<scalar_t>(), grad);
});
return out_tensor;
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_binary_kernel(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.type(), "threshold", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
REGISTER_DISPATCH(threshold_stub, &threshold_kernel);
}} // namespace at::native
| daa4b005f8efcd80a7b1e8511bab98230a18d0e5.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/Activation.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at { namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
input,
result,
[=] __device__ (
const scalar_t& input_val,
scalar_t& result_val) {
result_val = (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
AT_CHECK(self.is_cuda());
AT_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
AT_CHECK(input.is_contiguous());
AT_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
AT_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
AT_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
AT_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
[=] __device__ (
const scalar_t& input_val,
const scalar_t& grad_out_val,
scalar_t& input_grad_val,
scalar_t& weight_grad_collector_val) {
input_grad_val = (input_val > 0) ? grad_out_val : *weight_data * grad_out_val;
weight_grad_collector_val = (input_val > 0) ? scalar_t(0) : input_val * grad_out_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
AT_CHECK(grad_out_.is_cuda());
AT_CHECK(self.is_cuda());
AT_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
AT_CHECK(input.is_contiguous());
AT_CHECK(weight.is_contiguous());
AT_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input);
Tensor weight_grad = at::empty_like(weight);
Tensor weight_grad_collector = at::empty_like(input);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
AT_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
AT_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
AT_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data<scalar_t>(),
weight.data<scalar_t>(),
grad_out.data<scalar_t>(),
input_grad.data<scalar_t>(),
weight_grad_collector.data<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
template <typename scalar_t>
void hardshrink_cuda_kernel(const Tensor& self, Tensor& out_tensor, scalar_t lambd) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
self,
out_tensor,
[=] __device__ (
scalar_t& self_val,
scalar_t& out_tensor_val) {
out_tensor_val = (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : self_val;
});
}
template <typename scalar_t>
void hardshrink_backward_cuda_kernel(const Tensor& self, Tensor& out_tensor, scalar_t lambd, const Tensor& grad) {
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
self,
grad,
out_tensor,
[=] __device__ (
scalar_t& self_val,
scalar_t& grad_val,
scalar_t& out_tensor_val) {
out_tensor_val = (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
}
Tensor hardshrink_cuda(const Tensor & self, Scalar lambd) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "hardshrink_cuda", [&] {
hardshrink_cuda_kernel<scalar_t>(self, out_tensor, lambd.to<scalar_t>());
});
return out_tensor;
}
Tensor hardshrink_backward_cuda(const Tensor & grad, const Tensor & self, Scalar lambd) {
auto out_tensor = at::empty_like(grad);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "hardshrink_backward_cuda", [&] {
hardshrink_backward_cuda_kernel<scalar_t>(self, out_tensor, lambd.to<scalar_t>(), grad);
});
return out_tensor;
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_binary_kernel(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.type(), "threshold", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
REGISTER_DISPATCH(threshold_stub, &threshold_kernel);
}} // namespace at::native
|
4a242b416cdd31212e2fac9977f2ed9dbf51938c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void square(float *d_out, float *d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate input array on host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// transfer array to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch kernel
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
//copy back to CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out results
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf( ((i % 4) != 3) ? "\t" : "\n" );
}
// free GPU memory
hipFree(d_in);
hipFree(d_out);
return 0;
} | 4a242b416cdd31212e2fac9977f2ed9dbf51938c.cu | #include <stdio.h>
__global__ void square(float *d_out, float *d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate input array on host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer array to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch kernel
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
//copy back to CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out results
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf( ((i % 4) != 3) ? "\t" : "\n" );
}
// free GPU memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
5db7d5dd8afe533b148f0fad869728eed344a8e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <conio.h>
#include <string.h>
#include <hip/hip_runtime.h>
__global__ void mul_matrix_on_gpu( float* a, float *b, float *c, int N )
{
float sum=0;
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
for(int i=0;i<N;i++){
if(x<N && y<N){
int indexa = x*N+i;
int indexb = i*N+y;
sum+=a[indexa]*b[indexb];}
}
c[x*N+y]=sum;
}
int main()
{
int N;
printf("\nEnter the no: of rows/columns in the matrix : ");
scanf("%d",&N);
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
printf("\nEnter the 1st matrix\n");
for ( int i = 0; i < N*N; ++i ) {
scanf("%f",&a[i]); }
printf("\nEnter the 2nd matrix\n");
for ( int i = 0; i < N*N; ++i ) {
scanf("%f",&b[i]); }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
hipMalloc( (void**)&ad, size );
hipMalloc( (void**)&bd, size );
hipMalloc( (void**)&cd, size );
hipMemcpy( ad, a, size, hipMemcpyHostToDevice );
hipMemcpy( bd, b, size, hipMemcpyHostToDevice );
int blocksize;
if(N>2)
for(blocksize=1;N%blocksize!=0;++blocksize);
else blocksize=1;
printf("\nBlock Size = %d\n",blocksize);
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
hipLaunchKernelGGL(( mul_matrix_on_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd, N );
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",a[i*N + j]);
printf("\n");}
printf("\n + \n");
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",b[i*N + j]);
printf("\n");}
printf("\nPRODUCT \n");
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",c[i*N + j]);
printf("\n");}
hipFree( ad ); hipFree( bd ); hipFree( cd );
delete[] a; delete[] b; delete[] c;
getch();
return EXIT_SUCCESS;
}
| 5db7d5dd8afe533b148f0fad869728eed344a8e0.cu | #include <stdlib.h>
#include <stdio.h>
#include <conio.h>
#include <string.h>
#include <cuda.h>
__global__ void mul_matrix_on_gpu( float* a, float *b, float *c, int N )
{
float sum=0;
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
for(int i=0;i<N;i++){
if(x<N && y<N){
int indexa = x*N+i;
int indexb = i*N+y;
sum+=a[indexa]*b[indexb];}
}
c[x*N+y]=sum;
}
int main()
{
int N;
printf("\nEnter the no: of rows/columns in the matrix : ");
scanf("%d",&N);
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
printf("\nEnter the 1st matrix\n");
for ( int i = 0; i < N*N; ++i ) {
scanf("%f",&a[i]); }
printf("\nEnter the 2nd matrix\n");
for ( int i = 0; i < N*N; ++i ) {
scanf("%f",&b[i]); }
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
int blocksize;
if(N>2)
for(blocksize=1;N%blocksize!=0;++blocksize);
else blocksize=1;
printf("\nBlock Size = %d\n",blocksize);
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/dimBlock.x, N/dimBlock.y );
mul_matrix_on_gpu<<<dimGrid, dimBlock>>>( ad, bd, cd, N );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",a[i*N + j]);
printf("\n");}
printf("\n + \n");
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",b[i*N + j]);
printf("\n");}
printf("\nPRODUCT \n");
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %f ",c[i*N + j]);
printf("\n");}
cudaFree( ad ); cudaFree( bd ); cudaFree( cd );
delete[] a; delete[] b; delete[] c;
getch();
return EXIT_SUCCESS;
}
|
9542b691060ab3fa10c00371cf4a7e3133d74c67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zpotf2.cu, normal z -> c, Sun Nov 20 20:20:31 2016
*/
#include "magma_internal.h"
#define COMPLEX
#define cdotc_max_bs 512 // 512 is max threads for 1.x cards
void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
#ifdef COMPLEX
void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
#endif
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
Purpose
-------
cpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_potf2
*******************************************************************************/
extern "C" magma_int_t
magma_cpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > cdotc_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
magmaFloatComplex alpha = MAGMA_C_NEG_ONE;
magmaFloatComplex beta = MAGMA_C_ONE;
if (uplo == MagmaUpper) {
for (j = 0; j < n; j++) {
cpotf2_cdotc( j, dA(0,j), 1, queue ); // including cdotc product and update a(j,j)
if (j < n) {
#ifdef COMPLEX
magmablas_clacgv( j, dA(0, j), 1, queue );
#endif
magma_cgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda, queue );
#ifdef COMPLEX
magmablas_clacgv( j, dA(0, j), 1, queue );
#endif
cpotf2_csscal( n-j, dA(j,j), ldda, queue );
}
}
}
else {
for (j = 0; j < n; j++) {
cpotf2_cdotc( j, dA(j,0), ldda, queue ); // including cdotc product and update a(j,j)
if (j < n) {
#ifdef COMPLEX
magmablas_clacgv( j, dA(j, 0), ldda, queue );
#endif
magma_cgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1, queue );
#ifdef COMPLEX
magmablas_clacgv( j, dA(j, 0), ldda, queue );
#endif
cpotf2_csscal( n-j, dA(j,j), 1, queue );
}
}
}
return *info;
}
#define csscal_bs 32
#define cdotc_bs 512
#define clacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
__global__ void kernel_cdotc(int n, magmaFloatComplex *x, int incx, int threadSize)
{
int tx = threadIdx.x;
float *sdata = shared_data;
magmaFloatComplex res = MAGMA_C_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_C_REAL(x[n*incx]);
x[n*incx] = MAGMA_C_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void cpotf2_cdotc(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
/*
Specialized Cdotc
1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > cdotc_max_bs) {
fprintf( stderr, "n = %lld > %lld is not supported in cpotf2_cdotc\n",
(long long) n, (long long) cdotc_max_bs );
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
size_t shmem = threadSize * sizeof(float);
hipLaunchKernelGGL(( kernel_cdotc)
, dim3(1), dim3(threadSize), shmem, queue->cuda_stream() ,
n, x, incx, threadSize);
}
__global__ void kernel_csscal(int n, magmaFloatComplex *x, int incx)
{
int id = blockIdx.x * csscal_bs + threadIdx.x;
__shared__ magmaFloatComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
}
}
void cpotf2_csscal(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
/* Specialized csscal perform x[1:n-1] / x[0] */
dim3 threads(csscal_bs, 1, 1);
int num_blocks = magma_ceildiv( n, csscal_bs );
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_csscal)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x, incx);
}
#ifdef COMPLEX
__global__ void kernel_clacgv(int n, magmaFloatComplex *x, int incx)
{
int id = blockIdx.x * clacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_C_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
CLACGV conjugates a complex vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x COMPLEX array, dimension (1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacgv
*******************************************************************************/
void magmablas_clacgv(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
dim3 threads(clacgv_bs, 1, 1);
int num_blocks = magma_ceildiv( n, clacgv_bs );
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_clacgv)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x, incx);
}
#endif // COMPLEX
| 9542b691060ab3fa10c00371cf4a7e3133d74c67.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zpotf2.cu, normal z -> c, Sun Nov 20 20:20:31 2016
*/
#include "magma_internal.h"
#define COMPLEX
#define cdotc_max_bs 512 // 512 is max threads for 1.x cards
void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
#ifdef COMPLEX
void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue );
#endif
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
Purpose
-------
cpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_potf2
*******************************************************************************/
extern "C" magma_int_t
magma_cpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > cdotc_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
magmaFloatComplex alpha = MAGMA_C_NEG_ONE;
magmaFloatComplex beta = MAGMA_C_ONE;
if (uplo == MagmaUpper) {
for (j = 0; j < n; j++) {
cpotf2_cdotc( j, dA(0,j), 1, queue ); // including cdotc product and update a(j,j)
if (j < n) {
#ifdef COMPLEX
magmablas_clacgv( j, dA(0, j), 1, queue );
#endif
magma_cgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda, queue );
#ifdef COMPLEX
magmablas_clacgv( j, dA(0, j), 1, queue );
#endif
cpotf2_csscal( n-j, dA(j,j), ldda, queue );
}
}
}
else {
for (j = 0; j < n; j++) {
cpotf2_cdotc( j, dA(j,0), ldda, queue ); // including cdotc product and update a(j,j)
if (j < n) {
#ifdef COMPLEX
magmablas_clacgv( j, dA(j, 0), ldda, queue );
#endif
magma_cgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1, queue );
#ifdef COMPLEX
magmablas_clacgv( j, dA(j, 0), ldda, queue );
#endif
cpotf2_csscal( n-j, dA(j,j), 1, queue );
}
}
}
return *info;
}
#define csscal_bs 32
#define cdotc_bs 512
#define clacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
__global__ void kernel_cdotc(int n, magmaFloatComplex *x, int incx, int threadSize)
{
int tx = threadIdx.x;
float *sdata = shared_data;
magmaFloatComplex res = MAGMA_C_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res));
__syncthreads();
for (int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile float* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
float xreal = MAGMA_C_REAL(x[n*incx]);
x[n*incx] = MAGMA_C_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void cpotf2_cdotc(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
/*
Specialized Cdotc
1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > cdotc_max_bs) {
fprintf( stderr, "n = %lld > %lld is not supported in cpotf2_cdotc\n",
(long long) n, (long long) cdotc_max_bs );
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
size_t shmem = threadSize * sizeof(float);
kernel_cdotc
<<< 1, threadSize, shmem, queue->cuda_stream() >>>
(n, x, incx, threadSize);
}
__global__ void kernel_csscal(int n, magmaFloatComplex *x, int incx)
{
int id = blockIdx.x * csscal_bs + threadIdx.x;
__shared__ magmaFloatComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id > 0) {
x[id*incx] = x[id*incx] * factor;
}
}
void cpotf2_csscal(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
/* Specialized csscal perform x[1:n-1] / x[0] */
dim3 threads(csscal_bs, 1, 1);
int num_blocks = magma_ceildiv( n, csscal_bs );
dim3 grid(num_blocks,1);
kernel_csscal
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x, incx);
}
#ifdef COMPLEX
__global__ void kernel_clacgv(int n, magmaFloatComplex *x, int incx)
{
int id = blockIdx.x * clacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_C_CONJ(x[id*incx]);
}
}
/***************************************************************************//**
Purpose
-------
CLACGV conjugates a complex vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x COMPLEX array, dimension (1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacgv
*******************************************************************************/
void magmablas_clacgv(
magma_int_t n, magmaFloatComplex *x, magma_int_t incx,
magma_queue_t queue )
{
dim3 threads(clacgv_bs, 1, 1);
int num_blocks = magma_ceildiv( n, clacgv_bs );
dim3 grid(num_blocks,1);
kernel_clacgv
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x, incx);
}
#endif // COMPLEX
|
6544c591bfaf182d212b376761f7e91fbfa479aa.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg libary. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#include <mfmg/common/utils.hpp>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/trilinos_index_access.h>
namespace mfmg
{
namespace internal
{
template <typename ScalarType>
ScalarType *copy_to_gpu(std::vector<ScalarType> const &val)
{
unsigned int const n_elements = val.size();
ASSERT(n_elements > 0, "Cannot copy an empty vector to the device");
ScalarType *val_dev;
hipError_t error_code =
hipMalloc(&val_dev, n_elements * sizeof(ScalarType));
ASSERT_CUDA(error_code);
error_code = hipMemcpy(val_dev, &val[0], n_elements * sizeof(ScalarType),
hipMemcpyHostToDevice);
ASSERT_CUDA(error_code);
return val_dev;
}
} // namespace internal
template <typename ScalarType>
SparseMatrixDevice<ScalarType>
convert_matrix(dealii::SparseMatrix<ScalarType> const &sparse_matrix)
{
unsigned int const nnz = sparse_matrix.n_nonzero_elements();
int const n_rows = sparse_matrix.m();
int const row_ptr_size = n_rows + 1;
std::vector<ScalarType> val;
val.reserve(nnz);
std::vector<int> column_index;
column_index.reserve(nnz);
std::vector<int> row_ptr(row_ptr_size, 0);
for (int row = 0; row < n_rows; ++row)
{
auto p_end = sparse_matrix.end(row);
unsigned int counter = 0;
for (auto p = sparse_matrix.begin(row); p != p_end; ++p)
{
val.emplace_back(p->value());
column_index.emplace_back(p->column());
++counter;
}
row_ptr[row + 1] = row_ptr[row] + counter;
// If the matrix is square deal.II stores the diagonal first in each row so
// we need to do some reordering
if (sparse_matrix.m() == sparse_matrix.n())
{
// Sort the elements in the row
unsigned int const offset = row_ptr[row];
int const diag_index = column_index[offset];
ScalarType diag_elem = sparse_matrix.diag_element(row);
unsigned int pos = 1;
while ((column_index[offset + pos] < row) && (pos < counter))
{
val[offset + pos - 1] = val[offset + pos];
column_index[offset + pos - 1] = column_index[offset + pos];
++pos;
}
val[offset + pos - 1] = diag_elem;
column_index[offset + pos - 1] = diag_index;
}
}
return SparseMatrixDevice<ScalarType>(
MPI_COMM_SELF, internal::copy_to_gpu(val),
internal::copy_to_gpu(column_index), internal::copy_to_gpu(row_ptr), nnz,
dealii::complete_index_set(n_rows),
dealii::complete_index_set(sparse_matrix.n()));
}
SparseMatrixDevice<double>
convert_matrix(dealii::TrilinosWrappers::SparseMatrix const &sparse_matrix)
{
unsigned int const n_local_rows = sparse_matrix.local_size();
std::vector<double> val;
std::vector<int> column_index;
std::vector<int> row_ptr(n_local_rows + 1);
unsigned int local_nnz = 0;
for (unsigned int row = 0; row < n_local_rows; ++row)
{
int n_entries;
double *values;
int *indices;
sparse_matrix.trilinos_matrix().ExtractMyRowView(row, n_entries, values,
indices);
val.insert(val.end(), values, values + n_entries);
row_ptr[row + 1] = row_ptr[row] + n_entries;
// Trilinos does not store the column indices directly
for (int i = 0; i < n_entries; ++i)
column_index.push_back(dealii::TrilinosWrappers::global_column_index(
sparse_matrix.trilinos_matrix(), indices[i]));
local_nnz += n_entries;
}
return SparseMatrixDevice<double>(
sparse_matrix.get_mpi_communicator(), internal::copy_to_gpu(val),
internal::copy_to_gpu(column_index), internal::copy_to_gpu(row_ptr),
local_nnz, sparse_matrix.locally_owned_range_indices(),
sparse_matrix.locally_owned_domain_indices());
}
SparseMatrixDevice<double> convert_matrix(Epetra_CrsMatrix const &sparse_matrix)
{
auto range_map = sparse_matrix.RangeMap();
unsigned int const n_local_rows = range_map.NumMyElements();
std::vector<int> row_gid(n_local_rows);
range_map.MyGlobalElements(row_gid.data());
dealii::IndexSet range_indexset(range_map.NumGlobalElements());
range_indexset.add_indices(row_gid.begin(), row_gid.end());
range_indexset.compress();
auto domain_map = sparse_matrix.DomainMap();
std::vector<int> column_gid(domain_map.NumMyElements());
domain_map.MyGlobalElements(column_gid.data());
dealii::IndexSet domain_indexset(domain_map.NumGlobalElements());
domain_indexset.add_indices(column_gid.begin(), column_gid.end());
domain_indexset.compress();
unsigned int const local_nnz = sparse_matrix.NumMyNonzeros();
int *row_ptr_host = nullptr;
int *local_column_index_host = nullptr;
double *val_host = nullptr;
sparse_matrix.ExtractCrsDataPointers(row_ptr_host, local_column_index_host,
val_host);
std::vector<int> column_index(local_nnz);
for (unsigned int i = 0; i < local_nnz; ++i)
column_index[i] = sparse_matrix.GCID(local_column_index_host[i]);
double *val_dev;
cuda_malloc(val_dev, local_nnz);
hipError_t cuda_error;
cuda_error = hipMemcpy(val_dev, val_host, local_nnz * sizeof(double),
hipMemcpyHostToDevice);
ASSERT_CUDA(cuda_error);
int *row_ptr_dev;
unsigned int const row_ptr_size = n_local_rows + 1;
cuda_malloc(row_ptr_dev, row_ptr_size);
cuda_error = hipMemcpy(row_ptr_dev, row_ptr_host, row_ptr_size * sizeof(int),
hipMemcpyHostToDevice);
ASSERT_CUDA(cuda_error);
return SparseMatrixDevice<double>(
dynamic_cast<Epetra_MpiComm const &>(sparse_matrix.Comm()).Comm(),
val_dev, internal::copy_to_gpu(column_index), row_ptr_dev, local_nnz,
range_indexset, domain_indexset);
}
dealii::TrilinosWrappers::SparseMatrix
convert_to_trilinos_matrix(SparseMatrixDevice<double> const &matrix_dev)
{
unsigned int const local_nnz = matrix_dev.local_nnz();
unsigned int const n_local_rows = matrix_dev.n_local_rows();
std::vector<double> values(local_nnz);
std::vector<int> column_index(local_nnz);
std::vector<int> row_ptr(n_local_rows + 1);
// Copy the data to the host
cuda_mem_copy_to_host(matrix_dev.val_dev, values);
cuda_mem_copy_to_host(matrix_dev.column_index_dev, column_index);
cuda_mem_copy_to_host(matrix_dev.row_ptr_dev, row_ptr);
// Create the sparse matrix on the host
dealii::IndexSet locally_owned_rows =
matrix_dev.locally_owned_range_indices();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(
locally_owned_rows, matrix_dev.locally_owned_domain_indices(),
matrix_dev.get_mpi_communicator());
unsigned int pos = 0;
for (auto row : locally_owned_rows)
{
unsigned int const n_cols = row_ptr[pos + 1] - row_ptr[pos];
sparse_matrix.set(
row, n_cols,
reinterpret_cast<unsigned int *>(column_index.data() + row_ptr[pos]),
values.data() + row_ptr[pos]);
++pos;
}
sparse_matrix.compress(dealii::VectorOperation::insert);
return sparse_matrix;
}
std::tuple<std::unordered_map<int, int>, std::unordered_map<int, int>>
csr_to_amgx(std::unordered_set<int> const &rows_sent,
SparseMatrixDevice<double> &matrix_dev)
{
unsigned int local_nnz = matrix_dev.local_nnz();
int *row_index_coo_dev = nullptr;
cuda_malloc(row_index_coo_dev, local_nnz);
int n_local_rows = matrix_dev.n_local_rows();
// Change to COO format. The only thing that needs to be change to go from CSR
// to COO is to change row_ptr_dev with row_index_coo_dev.
hipsparseStatus_t cusparse_error_code = hipsparseXcsr2coo(
matrix_dev.cusparse_handle, matrix_dev.row_ptr_dev, local_nnz,
n_local_rows, row_index_coo_dev, HIPSPARSE_INDEX_BASE_ZERO);
ASSERT_CUSPARSE(cusparse_error_code);
// Move the values, the rows, and the columns to the host
std::vector<double> value_host(local_nnz);
cuda_mem_copy_to_host(matrix_dev.val_dev, value_host);
std::vector<int> col_index_host(local_nnz);
cuda_mem_copy_to_host(matrix_dev.column_index_dev, col_index_host);
std::vector<int> row_index_host(local_nnz);
cuda_mem_copy_to_host(row_index_coo_dev, row_index_host);
// Renumber halo data behind the local data
auto range_indexset = matrix_dev.locally_owned_range_indices();
std::vector<unsigned int> global_rows;
range_indexset.fill_index_vector(global_rows);
std::unordered_map<int, int> halo_map;
for (unsigned int i = 0; i < n_local_rows; ++i)
halo_map[global_rows[i]] = i;
unsigned int const n_rows = matrix_dev.m();
int next_free_id = n_local_rows;
dealii::IndexSet col_indexset(matrix_dev.n());
col_indexset.add_indices(col_index_host.begin(), col_index_host.end());
col_indexset.compress();
for (auto index : col_indexset)
{
int rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
if (range_indexset.is_element(index) == false)
{
halo_map[index] = next_free_id;
++next_free_id;
}
}
for (auto &col_index : col_index_host)
col_index = halo_map[col_index];
// Reorder rows and columns. We need to move to the top the rows that are
// locally owned
int strictly_owned_rows = n_local_rows - rows_sent.size();
std::unordered_map<int, int> local_map;
next_free_id = strictly_owned_rows;
int next_free_local_id = 0;
for (unsigned int i = 0; i < n_local_rows; ++i)
{
if (rows_sent.count(i) != 1)
{
local_map[i] = next_free_local_id;
++next_free_local_id;
}
else
{
local_map[i] = next_free_id;
++next_free_id;
}
}
for (auto &col_index : col_index_host)
{
if (col_index < n_local_rows)
col_index = local_map[col_index];
}
for (auto &row_index : row_index_host)
row_index = local_map[row_index];
// Sort the vectors
auto permutation = sort_permutation(row_index_host, col_index_host);
apply_permutation_in_place(permutation, value_host);
apply_permutation_in_place(permutation, col_index_host);
apply_permutation_in_place(permutation, row_index_host);
// Move the data back to the device
cuda_mem_copy_to_dev(value_host, matrix_dev.val_dev);
cuda_mem_copy_to_dev(col_index_host, matrix_dev.column_index_dev);
cuda_mem_copy_to_dev(row_index_host, row_index_coo_dev);
// Change to CSR format
cusparse_error_code = hipsparseXcoo2csr(
matrix_dev.cusparse_handle, row_index_coo_dev, local_nnz, n_local_rows,
matrix_dev.row_ptr_dev, HIPSPARSE_INDEX_BASE_ZERO);
// Free allocated memory
cuda_free(row_index_coo_dev);
return std::make_tuple(halo_map, local_map);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
unsigned int *send_buffer, unsigned int recv_count,
unsigned int *recv_buffer)
{
int comm_size;
MPI_Comm_size(communicator, &comm_size);
// First gather the number of elements each proc will send
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_UNSIGNED, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_UNSIGNED,
communicator);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count, float *recv_buffer)
{
int comm_size;
MPI_Comm_size(communicator, &comm_size);
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_FLOAT, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_FLOAT,
communicator);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// First gather the number of elements each proc will send
int comm_size;
MPI_Comm_size(communicator, &comm_size);
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_DOUBLE, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_DOUBLE,
communicator);
}
#ifdef MFMG_WITH_CUDA_MPI
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count,
float *recv_buffer)
{
// First gather the number of elements each proc will send
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
all_gather(communicator, send_count, send_buffer, recv_count, recv_buffer);
}
else
{
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(recv_buffer, send_buffer, send_count * sizeof(float),
hipMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
all_gather(communicator, send_count, send_buffer, recv_count, recv_buffer);
}
else
{
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(recv_buffer, send_buffer, send_count * sizeof(double),
hipMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
#else
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count,
float *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
// We cannot call MPI directly, so first we copy the send_buffer to the host
// and after the communication, we copy the result in the recv_buffer.
std::vector<float> send_buffer_host(send_count);
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(&send_buffer_host[0], send_buffer,
send_count * sizeof(float), hipMemcpyDeviceToHost);
ASSERT_CUDA(cuda_error_code);
std::vector<float> recv_buffer_host(recv_count);
all_gather(communicator, send_count, &send_buffer_host[0], recv_count,
&recv_buffer_host[0]);
cuda_error_code =
hipMemcpy(recv_buffer, &recv_buffer_host[0],
recv_count * sizeof(float), hipMemcpyHostToDevice);
ASSERT_CUDA(cuda_error_code);
}
else
{
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(recv_buffer, send_buffer, send_count * sizeof(float),
hipMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
// We cannot call MPI directly, so first we copy the send_buffer to the host
// and after the communication, we copy the result in the recv_buffer.
std::vector<double> send_buffer_host(send_count);
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(&send_buffer_host[0], send_buffer,
send_count * sizeof(double), hipMemcpyDeviceToHost);
ASSERT_CUDA(cuda_error_code);
std::vector<double> recv_buffer_host(recv_count);
all_gather(communicator, send_count, &send_buffer_host[0], recv_count,
&recv_buffer_host[0]);
cuda_error_code =
hipMemcpy(recv_buffer, &recv_buffer_host[0],
recv_count * sizeof(double), hipMemcpyHostToDevice);
ASSERT_CUDA(cuda_error_code);
}
else
{
hipError_t cuda_error_code;
cuda_error_code =
hipMemcpy(recv_buffer, send_buffer, send_count * sizeof(double),
hipMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
#endif
template SparseMatrixDevice<double>
convert_matrix(dealii::SparseMatrix<double> const &sparse_matrix);
} // namespace mfmg
| 6544c591bfaf182d212b376761f7e91fbfa479aa.cu | /*************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg libary. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#include <mfmg/common/utils.hpp>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/trilinos_index_access.h>
namespace mfmg
{
namespace internal
{
template <typename ScalarType>
ScalarType *copy_to_gpu(std::vector<ScalarType> const &val)
{
unsigned int const n_elements = val.size();
ASSERT(n_elements > 0, "Cannot copy an empty vector to the device");
ScalarType *val_dev;
cudaError_t error_code =
cudaMalloc(&val_dev, n_elements * sizeof(ScalarType));
ASSERT_CUDA(error_code);
error_code = cudaMemcpy(val_dev, &val[0], n_elements * sizeof(ScalarType),
cudaMemcpyHostToDevice);
ASSERT_CUDA(error_code);
return val_dev;
}
} // namespace internal
template <typename ScalarType>
SparseMatrixDevice<ScalarType>
convert_matrix(dealii::SparseMatrix<ScalarType> const &sparse_matrix)
{
unsigned int const nnz = sparse_matrix.n_nonzero_elements();
int const n_rows = sparse_matrix.m();
int const row_ptr_size = n_rows + 1;
std::vector<ScalarType> val;
val.reserve(nnz);
std::vector<int> column_index;
column_index.reserve(nnz);
std::vector<int> row_ptr(row_ptr_size, 0);
for (int row = 0; row < n_rows; ++row)
{
auto p_end = sparse_matrix.end(row);
unsigned int counter = 0;
for (auto p = sparse_matrix.begin(row); p != p_end; ++p)
{
val.emplace_back(p->value());
column_index.emplace_back(p->column());
++counter;
}
row_ptr[row + 1] = row_ptr[row] + counter;
// If the matrix is square deal.II stores the diagonal first in each row so
// we need to do some reordering
if (sparse_matrix.m() == sparse_matrix.n())
{
// Sort the elements in the row
unsigned int const offset = row_ptr[row];
int const diag_index = column_index[offset];
ScalarType diag_elem = sparse_matrix.diag_element(row);
unsigned int pos = 1;
while ((column_index[offset + pos] < row) && (pos < counter))
{
val[offset + pos - 1] = val[offset + pos];
column_index[offset + pos - 1] = column_index[offset + pos];
++pos;
}
val[offset + pos - 1] = diag_elem;
column_index[offset + pos - 1] = diag_index;
}
}
return SparseMatrixDevice<ScalarType>(
MPI_COMM_SELF, internal::copy_to_gpu(val),
internal::copy_to_gpu(column_index), internal::copy_to_gpu(row_ptr), nnz,
dealii::complete_index_set(n_rows),
dealii::complete_index_set(sparse_matrix.n()));
}
SparseMatrixDevice<double>
convert_matrix(dealii::TrilinosWrappers::SparseMatrix const &sparse_matrix)
{
unsigned int const n_local_rows = sparse_matrix.local_size();
std::vector<double> val;
std::vector<int> column_index;
std::vector<int> row_ptr(n_local_rows + 1);
unsigned int local_nnz = 0;
for (unsigned int row = 0; row < n_local_rows; ++row)
{
int n_entries;
double *values;
int *indices;
sparse_matrix.trilinos_matrix().ExtractMyRowView(row, n_entries, values,
indices);
val.insert(val.end(), values, values + n_entries);
row_ptr[row + 1] = row_ptr[row] + n_entries;
// Trilinos does not store the column indices directly
for (int i = 0; i < n_entries; ++i)
column_index.push_back(dealii::TrilinosWrappers::global_column_index(
sparse_matrix.trilinos_matrix(), indices[i]));
local_nnz += n_entries;
}
return SparseMatrixDevice<double>(
sparse_matrix.get_mpi_communicator(), internal::copy_to_gpu(val),
internal::copy_to_gpu(column_index), internal::copy_to_gpu(row_ptr),
local_nnz, sparse_matrix.locally_owned_range_indices(),
sparse_matrix.locally_owned_domain_indices());
}
SparseMatrixDevice<double> convert_matrix(Epetra_CrsMatrix const &sparse_matrix)
{
auto range_map = sparse_matrix.RangeMap();
unsigned int const n_local_rows = range_map.NumMyElements();
std::vector<int> row_gid(n_local_rows);
range_map.MyGlobalElements(row_gid.data());
dealii::IndexSet range_indexset(range_map.NumGlobalElements());
range_indexset.add_indices(row_gid.begin(), row_gid.end());
range_indexset.compress();
auto domain_map = sparse_matrix.DomainMap();
std::vector<int> column_gid(domain_map.NumMyElements());
domain_map.MyGlobalElements(column_gid.data());
dealii::IndexSet domain_indexset(domain_map.NumGlobalElements());
domain_indexset.add_indices(column_gid.begin(), column_gid.end());
domain_indexset.compress();
unsigned int const local_nnz = sparse_matrix.NumMyNonzeros();
int *row_ptr_host = nullptr;
int *local_column_index_host = nullptr;
double *val_host = nullptr;
sparse_matrix.ExtractCrsDataPointers(row_ptr_host, local_column_index_host,
val_host);
std::vector<int> column_index(local_nnz);
for (unsigned int i = 0; i < local_nnz; ++i)
column_index[i] = sparse_matrix.GCID(local_column_index_host[i]);
double *val_dev;
cuda_malloc(val_dev, local_nnz);
cudaError_t cuda_error;
cuda_error = cudaMemcpy(val_dev, val_host, local_nnz * sizeof(double),
cudaMemcpyHostToDevice);
ASSERT_CUDA(cuda_error);
int *row_ptr_dev;
unsigned int const row_ptr_size = n_local_rows + 1;
cuda_malloc(row_ptr_dev, row_ptr_size);
cuda_error = cudaMemcpy(row_ptr_dev, row_ptr_host, row_ptr_size * sizeof(int),
cudaMemcpyHostToDevice);
ASSERT_CUDA(cuda_error);
return SparseMatrixDevice<double>(
dynamic_cast<Epetra_MpiComm const &>(sparse_matrix.Comm()).Comm(),
val_dev, internal::copy_to_gpu(column_index), row_ptr_dev, local_nnz,
range_indexset, domain_indexset);
}
dealii::TrilinosWrappers::SparseMatrix
convert_to_trilinos_matrix(SparseMatrixDevice<double> const &matrix_dev)
{
unsigned int const local_nnz = matrix_dev.local_nnz();
unsigned int const n_local_rows = matrix_dev.n_local_rows();
std::vector<double> values(local_nnz);
std::vector<int> column_index(local_nnz);
std::vector<int> row_ptr(n_local_rows + 1);
// Copy the data to the host
cuda_mem_copy_to_host(matrix_dev.val_dev, values);
cuda_mem_copy_to_host(matrix_dev.column_index_dev, column_index);
cuda_mem_copy_to_host(matrix_dev.row_ptr_dev, row_ptr);
// Create the sparse matrix on the host
dealii::IndexSet locally_owned_rows =
matrix_dev.locally_owned_range_indices();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(
locally_owned_rows, matrix_dev.locally_owned_domain_indices(),
matrix_dev.get_mpi_communicator());
unsigned int pos = 0;
for (auto row : locally_owned_rows)
{
unsigned int const n_cols = row_ptr[pos + 1] - row_ptr[pos];
sparse_matrix.set(
row, n_cols,
reinterpret_cast<unsigned int *>(column_index.data() + row_ptr[pos]),
values.data() + row_ptr[pos]);
++pos;
}
sparse_matrix.compress(dealii::VectorOperation::insert);
return sparse_matrix;
}
std::tuple<std::unordered_map<int, int>, std::unordered_map<int, int>>
csr_to_amgx(std::unordered_set<int> const &rows_sent,
SparseMatrixDevice<double> &matrix_dev)
{
unsigned int local_nnz = matrix_dev.local_nnz();
int *row_index_coo_dev = nullptr;
cuda_malloc(row_index_coo_dev, local_nnz);
int n_local_rows = matrix_dev.n_local_rows();
// Change to COO format. The only thing that needs to be change to go from CSR
// to COO is to change row_ptr_dev with row_index_coo_dev.
cusparseStatus_t cusparse_error_code = cusparseXcsr2coo(
matrix_dev.cusparse_handle, matrix_dev.row_ptr_dev, local_nnz,
n_local_rows, row_index_coo_dev, CUSPARSE_INDEX_BASE_ZERO);
ASSERT_CUSPARSE(cusparse_error_code);
// Move the values, the rows, and the columns to the host
std::vector<double> value_host(local_nnz);
cuda_mem_copy_to_host(matrix_dev.val_dev, value_host);
std::vector<int> col_index_host(local_nnz);
cuda_mem_copy_to_host(matrix_dev.column_index_dev, col_index_host);
std::vector<int> row_index_host(local_nnz);
cuda_mem_copy_to_host(row_index_coo_dev, row_index_host);
// Renumber halo data behind the local data
auto range_indexset = matrix_dev.locally_owned_range_indices();
std::vector<unsigned int> global_rows;
range_indexset.fill_index_vector(global_rows);
std::unordered_map<int, int> halo_map;
for (unsigned int i = 0; i < n_local_rows; ++i)
halo_map[global_rows[i]] = i;
unsigned int const n_rows = matrix_dev.m();
int next_free_id = n_local_rows;
dealii::IndexSet col_indexset(matrix_dev.n());
col_indexset.add_indices(col_index_host.begin(), col_index_host.end());
col_indexset.compress();
for (auto index : col_indexset)
{
int rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
if (range_indexset.is_element(index) == false)
{
halo_map[index] = next_free_id;
++next_free_id;
}
}
for (auto &col_index : col_index_host)
col_index = halo_map[col_index];
// Reorder rows and columns. We need to move to the top the rows that are
// locally owned
int strictly_owned_rows = n_local_rows - rows_sent.size();
std::unordered_map<int, int> local_map;
next_free_id = strictly_owned_rows;
int next_free_local_id = 0;
for (unsigned int i = 0; i < n_local_rows; ++i)
{
if (rows_sent.count(i) != 1)
{
local_map[i] = next_free_local_id;
++next_free_local_id;
}
else
{
local_map[i] = next_free_id;
++next_free_id;
}
}
for (auto &col_index : col_index_host)
{
if (col_index < n_local_rows)
col_index = local_map[col_index];
}
for (auto &row_index : row_index_host)
row_index = local_map[row_index];
// Sort the vectors
auto permutation = sort_permutation(row_index_host, col_index_host);
apply_permutation_in_place(permutation, value_host);
apply_permutation_in_place(permutation, col_index_host);
apply_permutation_in_place(permutation, row_index_host);
// Move the data back to the device
cuda_mem_copy_to_dev(value_host, matrix_dev.val_dev);
cuda_mem_copy_to_dev(col_index_host, matrix_dev.column_index_dev);
cuda_mem_copy_to_dev(row_index_host, row_index_coo_dev);
// Change to CSR format
cusparse_error_code = cusparseXcoo2csr(
matrix_dev.cusparse_handle, row_index_coo_dev, local_nnz, n_local_rows,
matrix_dev.row_ptr_dev, CUSPARSE_INDEX_BASE_ZERO);
// Free allocated memory
cuda_free(row_index_coo_dev);
return std::make_tuple(halo_map, local_map);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
unsigned int *send_buffer, unsigned int recv_count,
unsigned int *recv_buffer)
{
int comm_size;
MPI_Comm_size(communicator, &comm_size);
// First gather the number of elements each proc will send
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_UNSIGNED, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_UNSIGNED,
communicator);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count, float *recv_buffer)
{
int comm_size;
MPI_Comm_size(communicator, &comm_size);
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_FLOAT, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_FLOAT,
communicator);
}
void all_gather(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// First gather the number of elements each proc will send
int comm_size;
MPI_Comm_size(communicator, &comm_size);
std::vector<int> n_elem_per_procs(comm_size);
MPI_Allgather(&send_count, 1, MPI_INT, n_elem_per_procs.data(), 1, MPI_INT,
communicator);
// Gather the elements
std::vector<int> displs(comm_size);
for (int i = 1; i < comm_size; ++i)
displs[i] = displs[i - 1] + n_elem_per_procs[i - 1];
MPI_Allgatherv(send_buffer, send_count, MPI_DOUBLE, recv_buffer,
n_elem_per_procs.data(), displs.data(), MPI_DOUBLE,
communicator);
}
#ifdef MFMG_WITH_CUDA_MPI
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count,
float *recv_buffer)
{
// First gather the number of elements each proc will send
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
all_gather(communicator, send_count, send_buffer, recv_count, recv_buffer);
}
else
{
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(recv_buffer, send_buffer, send_count * sizeof(float),
cudaMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
all_gather(communicator, send_count, send_buffer, recv_count, recv_buffer);
}
else
{
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(recv_buffer, send_buffer, send_count * sizeof(double),
cudaMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
#else
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
float *send_buffer, unsigned int recv_count,
float *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
// We cannot call MPI directly, so first we copy the send_buffer to the host
// and after the communication, we copy the result in the recv_buffer.
std::vector<float> send_buffer_host(send_count);
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(&send_buffer_host[0], send_buffer,
send_count * sizeof(float), cudaMemcpyDeviceToHost);
ASSERT_CUDA(cuda_error_code);
std::vector<float> recv_buffer_host(recv_count);
all_gather(communicator, send_count, &send_buffer_host[0], recv_count,
&recv_buffer_host[0]);
cuda_error_code =
cudaMemcpy(recv_buffer, &recv_buffer_host[0],
recv_count * sizeof(float), cudaMemcpyHostToDevice);
ASSERT_CUDA(cuda_error_code);
}
else
{
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(recv_buffer, send_buffer, send_count * sizeof(float),
cudaMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
void all_gather_dev(MPI_Comm communicator, unsigned int send_count,
double *send_buffer, unsigned int recv_count,
double *recv_buffer)
{
// If there is only one proc, we just copy the value in the send_buffer to the
// recv_buffer.
int comm_size;
MPI_Comm_size(communicator, &comm_size);
if (comm_size > 1)
{
// We cannot call MPI directly, so first we copy the send_buffer to the host
// and after the communication, we copy the result in the recv_buffer.
std::vector<double> send_buffer_host(send_count);
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(&send_buffer_host[0], send_buffer,
send_count * sizeof(double), cudaMemcpyDeviceToHost);
ASSERT_CUDA(cuda_error_code);
std::vector<double> recv_buffer_host(recv_count);
all_gather(communicator, send_count, &send_buffer_host[0], recv_count,
&recv_buffer_host[0]);
cuda_error_code =
cudaMemcpy(recv_buffer, &recv_buffer_host[0],
recv_count * sizeof(double), cudaMemcpyHostToDevice);
ASSERT_CUDA(cuda_error_code);
}
else
{
cudaError_t cuda_error_code;
cuda_error_code =
cudaMemcpy(recv_buffer, send_buffer, send_count * sizeof(double),
cudaMemcpyDeviceToDevice);
ASSERT_CUDA(cuda_error_code);
}
}
#endif
template SparseMatrixDevice<double>
convert_matrix(dealii::SparseMatrix<double> const &sparse_matrix);
} // namespace mfmg
|
2a39aaa7e40437482a85805a951f32c14186796c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <random>
#include <vector>
#include <string>
#include <cmath>
#include <hip/hip_runtime.h>
#include <omp.h>
#include <hip/hip_vector_types.h>
#include <harppi.h>
#include "gpuerrchk.h"
// Set some values for easy reference and updating
#define PI 3.1415926535897932384626433832795
#define N_threads 256
__constant__ float d_L;
__constant__ float d_R;
__constant__ int d_Nshells;
__constant__ int d_Nparts;
// Declare arrays or variables that can be stored in the GPU's constant memory. These cannot be changed during
// the execution of the GPU kernel, but can be updated repeated from the host side code.
__constant__ int3 d_shifts[27];
// The __device__ decorator specifies a function that can be called from inside the __global__ decorated GPU
// kernel, or any other __device__ decorated function. The decorators lets nvcc know which parts of the code
// it needs to compile, while all other code is passed along to a standard compiler like the GNU Compiler
// collection's (GCC) g++. As the name of this function suggests, it simply calculates the 3D separation of two
// points.
__device__ float get_separation(float3 &r1, float3 &r2) {
return sqrtf((r1.x - r2.x)*(r1.x - r2.x) + (r1.y - r2.y)*(r1.y - r2.y) + (r1.z - r2.z)*(r1.z - r2.z));
}
// The code makes sure that the three lengths can in fact form a triangle. The __host__ decorator means that this
// can also be called from the CPU.
__device__ __host__ bool isTriangle(float r1, float r2, float r3) {
if (r1 > r2) {
float temp = r1;
r1 = r2;
r2 = temp;
}
if (r1 > r3) {
float temp = r1;
r1 = r3;
r3 = temp;
}
if (r2 > r3) {
float temp = r2;
r2 = r3;
r3 = temp;
}
if (r3 <= r1 + r2) {
return true;
} else {
return false;
}
}
// Determines which bin a triangle belongs in. By sorting the lengths first, we can make sure that all permutations
// of lengths are put in the same bin.
__device__ int get_shell(float d1, float d2, float d3) {
if (d1 > d2) {
float temp = d1;
d1 = d2;
d2 = temp;
}
if (d1 > d3) {
float temp = d1;
d1 = d3;
d3 = temp;
}
if (d2 > d3) {
float temp = d2;
d2 = d3;
d3 = temp;
}
if (d1 <= d2 && d2 <= d3 && d3 <= d1 + d2) {
int shell1 = d1*d_Nshells/d_R;
int shell2 = d2*d_Nshells/d_R;
int shell3 = d3*d_Nshells/d_R;
return shell3 + d_Nshells*(shell2 + d_Nshells*shell1);
} else {
return 1.0;
}
}
// The points are binned so that points that are spatially close are stored near each other. This function
// is used to cycle through all the bins that directly neighbor the bin containing the first point, including the
// bin that holds that first point, to find triangle. This way, even though the algorithm is still O(N^3), the N
// can be much smaller.
__device__ int4 get_index(int4 ngp, int i, int n, float3 &rShift) {
ngp.x += d_shifts[i].x;
ngp.y += d_shifts[i].y;
ngp.z += d_shifts[i].z;
rShift.x = 0.0;
rShift.y = 0.0;
rShift.z = 0.0;
if (ngp.x >= n) {
ngp.x -= n;
rShift.x = d_L;
}
if (ngp.y >= n) {
ngp.y -= n;
rShift.y = d_L;
}
if (ngp.z >= n) {
ngp.z -= n;
rShift.z = d_L;
}
if (ngp.x <= -1) {
ngp.x += n;
rShift.x = -d_L;
}
if (ngp.y <= -1) {
ngp.y += n;
rShift.y = -d_L;
}
if (ngp.z <= -1) {
ngp.z += n;
rShift.z = -d_L;
}
ngp.w = ngp.z + n*(ngp.y + n*ngp.x);
return ngp;
}
__global__ void countPairs(float3 *d_p, int2 *d_cells, int *d_pairs, int n) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < d_Nparts) {
float3 p1 = d_p[tid];
int4 ngp1 = {int(p1.x/d_R), int(p1.y/d_R), int(p1.z/d_R), 0};
if (ngp1.x == n) ngp1.x--;
if (ngp1.y == n) ngp1.y--;
if (ngp1.z == n) ngp1.z--;
for (int i = 0; i < 27; ++i) {
float3 p2shift;
int4 index2 = get_index(ngp1, i, n, p2shift);
int2 bounds = d_cells[index2.w];
for (int part2 = bounds.x; part2 <= bounds.y; ++part2) {
float3 p2 = d_p[part2];
p2.x += p2shift.x;
p2.y += p2shift.y;
p2.z += p2shift.z;
float r1 = get_separation(p1, p2);
if (r1 < d_R && r1 > 0) {
int shell = int(r1*d_Nshells/d_R);
atomicAdd(&d_pairs[shell], 1);
}
}
}
}
}
__global__ void countTriangles(float3 *d_p, int2 *d_cells, int *d_triangles, int n) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < d_Nparts) {
float3 p1 = d_p[tid];
int4 ngp1 = {int(p1.x/d_R), int(p1.y/d_R), int(p1.z/d_R)};
if (ngp1.x == n) ngp1.x--;
if (ngp1.y == n) ngp1.y--;
if (ngp1.z == n) ngp1.z--;
for (int i = 0; i < 27; ++i) {
float3 p2shift;
int4 index2 = get_index(ngp1, i, n, p2shift);
int2 bounds2 = d_cells[index2.w];
for (int part2 = bounds2.x; part2 <= bounds2.y; ++part2) {
float3 p2 = d_p[part2];
p2.x += p2shift.x;
p2.y += p2shift.y;
p2.z += p2shift.z;
float r1 = get_separation(p1, p2);
if (r1 < d_R && r1 > 0) {
for (int j = 0; j < 27; ++j) {
float3 p3shift;
int4 index3 = get_index(ngp1, j, n, p3shift);
int2 bounds3 = d_cells[index3.w];
for (int part3 = bounds3.x; part3 <= bounds3.y; ++part3) {
float3 p3 = d_p[part3];
p3.x += p3shift.x;
p3.y += p3shift.y;
p3.z += p3shift.z;
float r2 = get_separation(p2, p3);
float r3 = get_separation(p1, p3);
if (r2 < d_R && r2 > 0 && r3 < d_R && r3 > 0) {
int shell = get_shell(r1, r2, r3);
atomicAdd(&d_triangles[shell], 1);
}
}
}
}
}
}
}
}
std::vector<int2> getCells(std::vector<float3> &parts, double L, double R, int &n) {
n = int(L/R);
std::vector<std::vector<float3>> H(n*n*n);
std::vector<int2> cells;
std::cout << "Binning particles..." << std::endl;
for (int i = 0; i < parts.size(); ++i) {
int ix = parts[i].x/R;
int iy = parts[i].y/R;
int iz = parts[i].z/R;
int index = iz + n*(iy + n*ix);
H[index].push_back(parts[i]);
}
int part = 0;
for (int i = 0; i < H.size(); ++i) {
int2 cell = {part, int(part + H[i].size() - 1)};
cells.push_back(cell);
for (int j = 0; j < H[i].size(); ++j) {
parts[part + j] = H[i][j];
}
part += H[i].size();
}
return cells;
}
void writePairs(std::string file, std::vector<int> &pairs, float R, int N_shells) {
double dr = R/N_shells;
std::ofstream fout(file);
for (int i = 0; i < pairs.size(); ++i) {
double r = (i + 0.5)*dr;
fout << r << " " << pairs[i] << "\n";
}
fout.close();
}
void writeTriangles(std::string file, std::vector<int> &triangles, float R, int N_shells) {
double dr = R/N_shells;
std::ofstream fout(file);
for (int i = 0; i < N_shells; ++i) {
double r1 = (i + 0.5)*dr;
for (int j = i; j < N_shells; ++j) {
double r2 = (j + 0.5)*dr;
for (int k = j; k < N_shells; ++k) {
double r3 = (k + 0.5)*dr;
if (isTriangle(r1, r2, r3)) {
int index = k + N_shells*(j + N_shells*i);
fout << r1 << " " << r2 << " " << r3 << " " << triangles[index] << "\n";
}
}
}
}
fout.close();
}
int main(int argc, char *argv[]) {
parameters p(argv[1]);
p.print();
std::vector<int3> shifts;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
for (int k = -1; k <= 1; ++k) {
int3 temp = {i, j, k};
shifts.push_back(temp);
// std::cout << i << ", " << j << ", " << k << std::endl;
}
}
}
float L = float(p.getd("L"));
float R = float(p.getd("R"));
int N_shells = p.geti("N_shells");
hipSetDevice(0);
std::cout << "Writing values to constant memory..." << std::endl;
gpuErrchk(hipMemcpyToSymbol(d_L, &L, sizeof(float)));
gpuErrchk(hipMemcpyToSymbol(d_R, &R, sizeof(float)));
gpuErrchk(hipMemcpyToSymbol(d_Nshells, &N_shells, sizeof(int)));
gpuErrchk(hipMemcpyToSymbol(d_shifts, shifts.data(), shifts.size()*sizeof(int3)));
std::cout << "Reading input file..." << std::endl;
int N_parts;
std::ifstream fin(p.gets("inFile"), std::ios::binary);
fin.read((char *)&N_parts, sizeof(int));
std::cout << N_parts << std::endl;
std::vector<float3> parts(N_parts);
fin.read((char *)parts.data(), parts.size()*sizeof(float3));
fin.close();
std::cout << "Writing number of particles to constant memory..." << std::endl;
std::cout << " Number or particles = " << N_parts << std::endl;
hipMemcpyToSymbol(d_Nparts, &N_parts, sizeof(int));
std::cout << "Setting up cells..." << std::endl;
int n;
std::vector<int2> cells = getCells(parts, p.getd("L"), p.getd("R"), n);
std::ofstream fout("cells.dat");
for (int i = 0; i < cells.size(); ++i) {
fout << cells[i].x << " " << cells[i].y << "\n";
}
fout.close();
std::vector<int> pairs(N_shells);
std::vector<int> triangles(N_shells*N_shells*N_shells);
std::cout << "Declaring device pointers..." << std::endl;
int2 *d_cells;
int *d_pairs, *d_triangles;
float3 *d_parts;
std::cout << "Allocating device pointers..." << std::endl;
gpuErrchk(hipMalloc((void **)&d_cells, cells.size()*sizeof(int2)));
gpuErrchk(hipMalloc((void **)&d_pairs, pairs.size()*sizeof(int)));
gpuErrchk(hipMalloc((void **)&d_triangles, triangles.size()*sizeof(int)));
gpuErrchk(hipMalloc((void **)&d_parts, parts.size()*sizeof(float3)));
std::cout << "Copying from device to host..." << std::endl;
gpuErrchk(hipMemcpy(d_cells, cells.data(), cells.size()*sizeof(int2), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_pairs, pairs.data(), pairs.size()*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_triangles, triangles.data(), triangles.size()*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_parts, parts.data(), parts.size()*sizeof(float3), hipMemcpyHostToDevice));
std::cout << "Executing GPU kernels..." << std::endl;
int N_blocks = N_parts/N_threads + 1;
hipEvent_t begin, end;
float elapsedTime;
hipEventCreate(&begin);
hipEventRecord(begin, 0);
hipLaunchKernelGGL(( countPairs), dim3(N_blocks), dim3(N_threads), 0, 0, d_parts, d_cells, d_pairs, n);
hipEventCreate(&end);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsedTime, begin, end);
std::cout << "Time to count pairs: " << elapsedTime << " ms" << std::endl;
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipEventCreate(&begin);
hipEventRecord(begin, 0);
hipLaunchKernelGGL(( countTriangles), dim3(N_blocks), dim3(N_threads), 0, 0, d_parts, d_cells, d_triangles, n);
hipEventCreate(&end);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsedTime, begin, end);
std::cout << "Time to count triangles: " << elapsedTime << " ms" << std::endl;
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
std::cout << "Reading data back from GPU..." << std::endl;
gpuErrchk(hipMemcpy(pairs.data(), d_pairs, pairs.size()*sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(triangles.data(), d_triangles, triangles.size()*sizeof(int), hipMemcpyDeviceToHost));
std::cout << "Writing output files..." << std::endl;
writePairs(p.gets("pairsFile"), pairs, R, N_shells);
writeTriangles(p.gets("triangleFile"), triangles, R, N_shells);
std::cout << "Freeing GPU memory..." << std::endl;
gpuErrchk(hipFree(d_cells));
gpuErrchk(hipFree(d_pairs));
gpuErrchk(hipFree(d_triangles));
gpuErrchk(hipFree(d_parts));
std::cout << "Done!" << std::endl;
return 0;
}
| 2a39aaa7e40437482a85805a951f32c14186796c.cu | #include <iostream>
#include <fstream>
#include <random>
#include <vector>
#include <string>
#include <cmath>
#include <cuda.h>
#include <omp.h>
#include <vector_types.h>
#include <harppi.h>
#include "gpuerrchk.h"
// Set some values for easy reference and updating
#define PI 3.1415926535897932384626433832795
#define N_threads 256
__constant__ float d_L;
__constant__ float d_R;
__constant__ int d_Nshells;
__constant__ int d_Nparts;
// Declare arrays or variables that can be stored in the GPU's constant memory. These cannot be changed during
// the execution of the GPU kernel, but can be updated repeated from the host side code.
__constant__ int3 d_shifts[27];
// The __device__ decorator specifies a function that can be called from inside the __global__ decorated GPU
// kernel, or any other __device__ decorated function. The decorators lets nvcc know which parts of the code
// it needs to compile, while all other code is passed along to a standard compiler like the GNU Compiler
// collection's (GCC) g++. As the name of this function suggests, it simply calculates the 3D separation of two
// points.
__device__ float get_separation(float3 &r1, float3 &r2) {
return sqrtf((r1.x - r2.x)*(r1.x - r2.x) + (r1.y - r2.y)*(r1.y - r2.y) + (r1.z - r2.z)*(r1.z - r2.z));
}
// The code makes sure that the three lengths can in fact form a triangle. The __host__ decorator means that this
// can also be called from the CPU.
__device__ __host__ bool isTriangle(float r1, float r2, float r3) {
if (r1 > r2) {
float temp = r1;
r1 = r2;
r2 = temp;
}
if (r1 > r3) {
float temp = r1;
r1 = r3;
r3 = temp;
}
if (r2 > r3) {
float temp = r2;
r2 = r3;
r3 = temp;
}
if (r3 <= r1 + r2) {
return true;
} else {
return false;
}
}
// Determines which bin a triangle belongs in. By sorting the lengths first, we can make sure that all permutations
// of lengths are put in the same bin.
__device__ int get_shell(float d1, float d2, float d3) {
if (d1 > d2) {
float temp = d1;
d1 = d2;
d2 = temp;
}
if (d1 > d3) {
float temp = d1;
d1 = d3;
d3 = temp;
}
if (d2 > d3) {
float temp = d2;
d2 = d3;
d3 = temp;
}
if (d1 <= d2 && d2 <= d3 && d3 <= d1 + d2) {
int shell1 = d1*d_Nshells/d_R;
int shell2 = d2*d_Nshells/d_R;
int shell3 = d3*d_Nshells/d_R;
return shell3 + d_Nshells*(shell2 + d_Nshells*shell1);
} else {
return 1.0;
}
}
// The points are binned so that points that are spatially close are stored near each other. This function
// is used to cycle through all the bins that directly neighbor the bin containing the first point, including the
// bin that holds that first point, to find triangle. This way, even though the algorithm is still O(N^3), the N
// can be much smaller.
__device__ int4 get_index(int4 ngp, int i, int n, float3 &rShift) {
ngp.x += d_shifts[i].x;
ngp.y += d_shifts[i].y;
ngp.z += d_shifts[i].z;
rShift.x = 0.0;
rShift.y = 0.0;
rShift.z = 0.0;
if (ngp.x >= n) {
ngp.x -= n;
rShift.x = d_L;
}
if (ngp.y >= n) {
ngp.y -= n;
rShift.y = d_L;
}
if (ngp.z >= n) {
ngp.z -= n;
rShift.z = d_L;
}
if (ngp.x <= -1) {
ngp.x += n;
rShift.x = -d_L;
}
if (ngp.y <= -1) {
ngp.y += n;
rShift.y = -d_L;
}
if (ngp.z <= -1) {
ngp.z += n;
rShift.z = -d_L;
}
ngp.w = ngp.z + n*(ngp.y + n*ngp.x);
return ngp;
}
__global__ void countPairs(float3 *d_p, int2 *d_cells, int *d_pairs, int n) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < d_Nparts) {
float3 p1 = d_p[tid];
int4 ngp1 = {int(p1.x/d_R), int(p1.y/d_R), int(p1.z/d_R), 0};
if (ngp1.x == n) ngp1.x--;
if (ngp1.y == n) ngp1.y--;
if (ngp1.z == n) ngp1.z--;
for (int i = 0; i < 27; ++i) {
float3 p2shift;
int4 index2 = get_index(ngp1, i, n, p2shift);
int2 bounds = d_cells[index2.w];
for (int part2 = bounds.x; part2 <= bounds.y; ++part2) {
float3 p2 = d_p[part2];
p2.x += p2shift.x;
p2.y += p2shift.y;
p2.z += p2shift.z;
float r1 = get_separation(p1, p2);
if (r1 < d_R && r1 > 0) {
int shell = int(r1*d_Nshells/d_R);
atomicAdd(&d_pairs[shell], 1);
}
}
}
}
}
__global__ void countTriangles(float3 *d_p, int2 *d_cells, int *d_triangles, int n) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < d_Nparts) {
float3 p1 = d_p[tid];
int4 ngp1 = {int(p1.x/d_R), int(p1.y/d_R), int(p1.z/d_R)};
if (ngp1.x == n) ngp1.x--;
if (ngp1.y == n) ngp1.y--;
if (ngp1.z == n) ngp1.z--;
for (int i = 0; i < 27; ++i) {
float3 p2shift;
int4 index2 = get_index(ngp1, i, n, p2shift);
int2 bounds2 = d_cells[index2.w];
for (int part2 = bounds2.x; part2 <= bounds2.y; ++part2) {
float3 p2 = d_p[part2];
p2.x += p2shift.x;
p2.y += p2shift.y;
p2.z += p2shift.z;
float r1 = get_separation(p1, p2);
if (r1 < d_R && r1 > 0) {
for (int j = 0; j < 27; ++j) {
float3 p3shift;
int4 index3 = get_index(ngp1, j, n, p3shift);
int2 bounds3 = d_cells[index3.w];
for (int part3 = bounds3.x; part3 <= bounds3.y; ++part3) {
float3 p3 = d_p[part3];
p3.x += p3shift.x;
p3.y += p3shift.y;
p3.z += p3shift.z;
float r2 = get_separation(p2, p3);
float r3 = get_separation(p1, p3);
if (r2 < d_R && r2 > 0 && r3 < d_R && r3 > 0) {
int shell = get_shell(r1, r2, r3);
atomicAdd(&d_triangles[shell], 1);
}
}
}
}
}
}
}
}
std::vector<int2> getCells(std::vector<float3> &parts, double L, double R, int &n) {
n = int(L/R);
std::vector<std::vector<float3>> H(n*n*n);
std::vector<int2> cells;
std::cout << "Binning particles..." << std::endl;
for (int i = 0; i < parts.size(); ++i) {
int ix = parts[i].x/R;
int iy = parts[i].y/R;
int iz = parts[i].z/R;
int index = iz + n*(iy + n*ix);
H[index].push_back(parts[i]);
}
int part = 0;
for (int i = 0; i < H.size(); ++i) {
int2 cell = {part, int(part + H[i].size() - 1)};
cells.push_back(cell);
for (int j = 0; j < H[i].size(); ++j) {
parts[part + j] = H[i][j];
}
part += H[i].size();
}
return cells;
}
void writePairs(std::string file, std::vector<int> &pairs, float R, int N_shells) {
double dr = R/N_shells;
std::ofstream fout(file);
for (int i = 0; i < pairs.size(); ++i) {
double r = (i + 0.5)*dr;
fout << r << " " << pairs[i] << "\n";
}
fout.close();
}
void writeTriangles(std::string file, std::vector<int> &triangles, float R, int N_shells) {
double dr = R/N_shells;
std::ofstream fout(file);
for (int i = 0; i < N_shells; ++i) {
double r1 = (i + 0.5)*dr;
for (int j = i; j < N_shells; ++j) {
double r2 = (j + 0.5)*dr;
for (int k = j; k < N_shells; ++k) {
double r3 = (k + 0.5)*dr;
if (isTriangle(r1, r2, r3)) {
int index = k + N_shells*(j + N_shells*i);
fout << r1 << " " << r2 << " " << r3 << " " << triangles[index] << "\n";
}
}
}
}
fout.close();
}
int main(int argc, char *argv[]) {
parameters p(argv[1]);
p.print();
std::vector<int3> shifts;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
for (int k = -1; k <= 1; ++k) {
int3 temp = {i, j, k};
shifts.push_back(temp);
// std::cout << i << ", " << j << ", " << k << std::endl;
}
}
}
float L = float(p.getd("L"));
float R = float(p.getd("R"));
int N_shells = p.geti("N_shells");
cudaSetDevice(0);
std::cout << "Writing values to constant memory..." << std::endl;
gpuErrchk(cudaMemcpyToSymbol(d_L, &L, sizeof(float)));
gpuErrchk(cudaMemcpyToSymbol(d_R, &R, sizeof(float)));
gpuErrchk(cudaMemcpyToSymbol(d_Nshells, &N_shells, sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol(d_shifts, shifts.data(), shifts.size()*sizeof(int3)));
std::cout << "Reading input file..." << std::endl;
int N_parts;
std::ifstream fin(p.gets("inFile"), std::ios::binary);
fin.read((char *)&N_parts, sizeof(int));
std::cout << N_parts << std::endl;
std::vector<float3> parts(N_parts);
fin.read((char *)parts.data(), parts.size()*sizeof(float3));
fin.close();
std::cout << "Writing number of particles to constant memory..." << std::endl;
std::cout << " Number or particles = " << N_parts << std::endl;
cudaMemcpyToSymbol(d_Nparts, &N_parts, sizeof(int));
std::cout << "Setting up cells..." << std::endl;
int n;
std::vector<int2> cells = getCells(parts, p.getd("L"), p.getd("R"), n);
std::ofstream fout("cells.dat");
for (int i = 0; i < cells.size(); ++i) {
fout << cells[i].x << " " << cells[i].y << "\n";
}
fout.close();
std::vector<int> pairs(N_shells);
std::vector<int> triangles(N_shells*N_shells*N_shells);
std::cout << "Declaring device pointers..." << std::endl;
int2 *d_cells;
int *d_pairs, *d_triangles;
float3 *d_parts;
std::cout << "Allocating device pointers..." << std::endl;
gpuErrchk(cudaMalloc((void **)&d_cells, cells.size()*sizeof(int2)));
gpuErrchk(cudaMalloc((void **)&d_pairs, pairs.size()*sizeof(int)));
gpuErrchk(cudaMalloc((void **)&d_triangles, triangles.size()*sizeof(int)));
gpuErrchk(cudaMalloc((void **)&d_parts, parts.size()*sizeof(float3)));
std::cout << "Copying from device to host..." << std::endl;
gpuErrchk(cudaMemcpy(d_cells, cells.data(), cells.size()*sizeof(int2), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_pairs, pairs.data(), pairs.size()*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_triangles, triangles.data(), triangles.size()*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_parts, parts.data(), parts.size()*sizeof(float3), cudaMemcpyHostToDevice));
std::cout << "Executing GPU kernels..." << std::endl;
int N_blocks = N_parts/N_threads + 1;
cudaEvent_t begin, end;
float elapsedTime;
cudaEventCreate(&begin);
cudaEventRecord(begin, 0);
countPairs<<<N_blocks, N_threads>>>(d_parts, d_cells, d_pairs, n);
cudaEventCreate(&end);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, begin, end);
std::cout << "Time to count pairs: " << elapsedTime << " ms" << std::endl;
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaEventCreate(&begin);
cudaEventRecord(begin, 0);
countTriangles<<<N_blocks, N_threads>>>(d_parts, d_cells, d_triangles, n);
cudaEventCreate(&end);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, begin, end);
std::cout << "Time to count triangles: " << elapsedTime << " ms" << std::endl;
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
std::cout << "Reading data back from GPU..." << std::endl;
gpuErrchk(cudaMemcpy(pairs.data(), d_pairs, pairs.size()*sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(triangles.data(), d_triangles, triangles.size()*sizeof(int), cudaMemcpyDeviceToHost));
std::cout << "Writing output files..." << std::endl;
writePairs(p.gets("pairsFile"), pairs, R, N_shells);
writeTriangles(p.gets("triangleFile"), triangles, R, N_shells);
std::cout << "Freeing GPU memory..." << std::endl;
gpuErrchk(cudaFree(d_cells));
gpuErrchk(cudaFree(d_pairs));
gpuErrchk(cudaFree(d_triangles));
gpuErrchk(cudaFree(d_parts));
std::cout << "Done!" << std::endl;
return 0;
}
|
03b37c6faa68eec9baae28e477c4bba57c926455.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<functional>
#include<memory>
#include<cstdlib>
#include<ctime>
#include<vector>
#include"hip/hip_fp16.h"
#include"helper_cuda.h"
using std::cout;
using std::endl;
using std::unique_ptr;
using std::vector;
using std::function;
using fp = void(*)(int*);
void genInput(half2 * src, size_t size){
for(size_t i=0;i <size;i++){
unsigned int temp = rand();
temp &= 0x83FF83FF;
temp |= 0X3C003C00;
src[i] = *reinterpret_cast<half2*>(&temp);
//src[i] = *(half2*)&temp;
}
}
__forceinline__ __device__ void
reduceInShared(half2 * const v){
int tid = threadIdx.x;
//warp
if(tid<64)
v[tid] = __hadd2(v[tid], v[tid+64]);
__syncthreads();
//warp
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+32]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+16]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+8]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+4]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+2]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+1]);
__syncthreads();
}
__global__ void
scalarProductKernel(half2 const * const a,
half2 const * const b,
float * const results,
size_t const size
){
int const stride = gridDim.x*blockDim.x;//
__shared__ half2 shArray[128];
shArray[threadIdx.x] = __float2half2_rn(0.0f);
half2 value = __float2half2_rn(0.0f);//
//iblockDim.x
// blockIdx.x
// threadIdx.x
// thread.x+ blockDim.x*blockIdx.x;
//
//
for(size_t i = threadIdx.x+blockDim.x+blockIdx.x;
i<size; i += stride){
value = __hfma2(a[i], b[i], value);
}
shArray[threadIdx.x] = value;//
__syncthreads();
reduceInShared(shArray);
//results
if(threadIdx.x == 0){
half2 res = shArray[0];
float f_res = __low2float(res) + __high2float(res);
results[blockIdx.x] = f_res;
}
}
int
main(int argc, char *argv[]){
int devID = 0;
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop,devID));
if(prop.major<5 || (prop.major==5 && prop.minor<3)){
cout<<"ERROR: fp16 requires SM 5.3 or higher"<<endl;
exit(EXIT_FAILURE);
}
//-----------------------------
srand(time(NULL));
int const blocks = 128;
int const threads = 128;
//size_t size = blocks*threads*16; // ;16
size_t size = blocks*threads; //
auto lambdaHost = [](half2*p){hipHostFree(p);};
auto lambdaDev = [](half2*p){hipFree(p);};
//
vector<unique_ptr<half2,void(*)(half2*)>> vec;
vector<unique_ptr<half2,void(*)(half2*)>> devVec;
//size 16bit
for(int i=0;i<2;i++){
half2* tmp;
checkCudaErrors(hipHostMalloc((void**)&tmp,size*sizeof(half2)));
vec.emplace_back(tmp,lambdaHost);
half2* tmp1;
checkCudaErrors(hipMalloc((void**)&tmp1, size*sizeof(half2) ));
devVec.emplace_back(tmp1,lambdaDev);
}
//
unique_ptr<float,void(*)(float*)> results{nullptr,[](float*p){hipHostFree(p);}};
unique_ptr<float,void(*)(float*)> devResults{nullptr,[](float*p){hipFree(p);}};
float* _results;
float* _devResults;
checkCudaErrors(hipHostMalloc((void**)&_results,blocks*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&_devResults,blocks*sizeof(float)));
results.reset(_results);
devResults.reset(_devResults);
//
for(int i=0; i<2;i++){
genInput(vec[i].get(),size);
checkCudaErrors(hipMemcpy(devVec[i].get(),vec[i].get(),
size*sizeof(half2),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( scalarProductKernel), dim3(blocks), dim3(threads), 0, 0, devVec[0].get(),
devVec[1].get(),
devResults.get(), size);
}
checkCudaErrors(hipMemcpy(results.get(), devResults.get(), blocks*sizeof(half2),
hipMemcpyDeviceToHost));
float res = 0;
for(int i=0; i<blocks; i++){
res += *(results.get()+i);
}
printf("Result: %f \n", res);
exit(EXIT_SUCCESS);
}
| 03b37c6faa68eec9baae28e477c4bba57c926455.cu | #include<iostream>
#include<functional>
#include<memory>
#include<cstdlib>
#include<ctime>
#include<vector>
#include"cuda_fp16.h"
#include"helper_cuda.h"
using std::cout;
using std::endl;
using std::unique_ptr;
using std::vector;
using std::function;
using fp = void(*)(int*);
void genInput(half2 * src, size_t size){
for(size_t i=0;i <size;i++){
unsigned int temp = rand();
temp &= 0x83FF83FF;
temp |= 0X3C003C00;
src[i] = *reinterpret_cast<half2*>(&temp);
//src[i] = *(half2*)&temp;
}
}
__forceinline__ __device__ void
reduceInShared(half2 * const v){
int tid = threadIdx.x;
//两个warp
if(tid<64)
v[tid] = __hadd2(v[tid], v[tid+64]);
__syncthreads();
//一个warp内部
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+32]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+16]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+8]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+4]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+2]);
__syncthreads();
if(tid<32)
v[tid] = __hadd2(v[tid], v[tid+1]);
__syncthreads();
}
__global__ void
scalarProductKernel(half2 const * const a,
half2 const * const b,
float * const results,
size_t const size
){
int const stride = gridDim.x*blockDim.x;//一行大小
__shared__ half2 shArray[128];
shArray[threadIdx.x] = __float2half2_rn(0.0f);
half2 value = __float2half2_rn(0.0f);//结果
//i的取值,blockDim.x 表示块中线程个数;
// blockIdx.x 表示块的索引;
// threadIdx.x 表示线程的索引
// 按逻辑应该是thread.x+ blockDim.x*blockIdx.x;
// 表示经过几个块之后,当前块中线程索引,而不是相加
// 但是这个例子 核心是展示半精度计算,所以这些逻辑就不纠结了
for(size_t i = threadIdx.x+blockDim.x+blockIdx.x;
i<size; i += stride){
value = __hfma2(a[i], b[i], value);
}
shArray[threadIdx.x] = value;// 每个线程同步结果到共享存储
__syncthreads();
reduceInShared(shArray);
//结果写回到results
if(threadIdx.x == 0){
half2 res = shArray[0];
float f_res = __low2float(res) + __high2float(res);
results[blockIdx.x] = f_res;
}
}
int
main(int argc, char *argv[]){
int devID = 0;
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop,devID));
if(prop.major<5 || (prop.major==5 && prop.minor<3)){
cout<<"ERROR: fp16 requires SM 5.3 or higher"<<endl;
exit(EXIT_FAILURE);
}
//-----------------------------
srand(time(NULL));
int const blocks = 128;
int const threads = 128;
//size_t size = blocks*threads*16; // 一共多少字节;每个线程16个数值?
size_t size = blocks*threads; //猜测是例子写错了·
auto lambdaHost = [](half2*p){cudaFreeHost(p);};
auto lambdaDev = [](half2*p){cudaFree(p);};
// 申请输入的内存
vector<unique_ptr<half2,void(*)(half2*)>> vec;
vector<unique_ptr<half2,void(*)(half2*)>> devVec;
//size 本身就包含了16bit了
for(int i=0;i<2;i++){
half2* tmp;
checkCudaErrors(cudaMallocHost((void**)&tmp,size*sizeof(half2)));
vec.emplace_back(tmp,lambdaHost);
half2* tmp1;
checkCudaErrors(cudaMalloc((void**)&tmp1, size*sizeof(half2) ));
devVec.emplace_back(tmp1,lambdaDev);
}
//申请输出的内存
unique_ptr<float,void(*)(float*)> results{nullptr,[](float*p){cudaFreeHost(p);}};
unique_ptr<float,void(*)(float*)> devResults{nullptr,[](float*p){cudaFree(p);}};
float* _results;
float* _devResults;
checkCudaErrors(cudaMallocHost((void**)&_results,blocks*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&_devResults,blocks*sizeof(float)));
results.reset(_results);
devResults.reset(_devResults);
//执行核操作
for(int i=0; i<2;i++){
genInput(vec[i].get(),size);
checkCudaErrors(cudaMemcpy(devVec[i].get(),vec[i].get(),
size*sizeof(half2),
cudaMemcpyHostToDevice));
scalarProductKernel<<<blocks, threads>>>(devVec[0].get(),
devVec[1].get(),
devResults.get(), size);
}
checkCudaErrors(cudaMemcpy(results.get(), devResults.get(), blocks*sizeof(half2),
cudaMemcpyDeviceToHost));
float res = 0;
for(int i=0; i<blocks; i++){
res += *(results.get()+i);
}
printf("Result: %f \n", res);
exit(EXIT_SUCCESS);
}
|
9c6347f3bd53ffc7fea4b5e299f6b0d81821cdfd.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <chrono>
#include "CUDA/ga/CudaPopulation.h"
#include "CUDA/problems/salesman/CudaSalesmanUtils.h"
// Performs the whole process.
template <typename T>
void CudaPopulation<T>::solveProblem(int num_gens) {
for (int i = 0; i < num_gens; ++i) {
// evaluate all individuals
evaluate();
checkCudaErrors(hipDeviceSynchronize());
// Since we only use the top 2 to breed, don't normalise the probabilities
// breed next generation
breed();
checkCudaErrors(hipDeviceSynchronize());
// save scores.
saveScores();
checkCudaErrors(hipDeviceSynchronize());
// swap pointers to current and next pops.
std::swap(dev_pop, dev_next_pop);
}
}
template <typename T>
void CudaPopulation<T>::init() {
// malloc on host
mem_next_pop = (T *)malloc(sizeof(T) * population_size * genome_size);
mem_pop = (T *)malloc(sizeof(T) * population_size * genome_size);
// probabilities and population memory on device.
checkCudaErrors(hipMalloc(&dev_probabilities, sizeof(float) * population_size));
checkCudaErrors(hipMalloc(&dev_mem_pop, sizeof(T) * population_size * genome_size));
checkCudaErrors(hipMalloc(&dev_mem_next_pop, sizeof(T) * population_size * genome_size));
checkCudaErrors(hipMalloc(&device_total, sizeof(float)));
checkCudaErrors(hipMalloc(&dev_total_max, sizeof(float)));
pop = mem_pop;
next_pop = mem_next_pop;
dev_pop = dev_mem_pop;
dev_next_pop = dev_mem_next_pop;
// initialise cuda_random.
dev_random = get_init_cuda_rand(population_size);
// all scores
checkCudaErrors(hipMalloc(&dev_global_all_scores, sizeof(float) * NUM_SCORE_POINTS));
// number of steps so far.
checkCudaErrors(hipMalloc(&dev_current_count_of_steps, sizeof(int)));
int zero = 0;
checkCudaErrors(hipMemcpy(dev_current_count_of_steps, &zero, sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
init_data_randomly();
checkCudaErrors(hipDeviceSynchronize());
}
template <typename T>
CudaPopulation<T>::~CudaPopulation() {
// Free everything
cuda_rand_destroy(dev_random);
free(mem_next_pop);
free(mem_pop);
checkCudaErrors(hipFree(dev_probabilities));
checkCudaErrors(hipFree(dev_mem_pop));
checkCudaErrors(hipFree(dev_mem_next_pop));
checkCudaErrors(hipFree(device_total));
checkCudaErrors(hipFree(dev_total_max));
checkCudaErrors(hipFree(dev_global_all_scores));
checkCudaErrors(hipFree(dev_current_count_of_steps));
}
// Simply calls main_cuda_breed
template <typename T>
void CudaPopulation<T>::breed() {
// Number of bytes for the bool arrays, as well as for the two top parents.
int num = (genome_size - 1) / (sizeof(mybool) * 8) + 1;
const int number_of_bytes = 2 * num * sizeof(mybool) * dimBlock.x + genome_size * sizeof(T) * 2 + population_size * sizeof(int);
hipLaunchKernelGGL(( main_cuda_breed), dim3(dimGridHalf), dim3(dimBlockHalf), number_of_bytes, 0, dev_pop, dev_next_pop, dev_probabilities, population_size, genome_size, dev_random,
get_crossover_func(),
get_mutation_func(),
dev_total_max);
}
/**
* @brief Main function to breed dev_pop into dev_next_pop.
*
*/
template <typename T>
__global__ void main_cuda_breed(T *dev_pop, T *dev_next_pop, float *dev_probabilites_normalised, int population_size, int genome_size,
hiprandState_t *dev_random,
crossover_func<T> function_to_do_crossover,
mutation_func<T> function_to_do_mutation,
float *dev_total_max) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int tId = threadIdx.x;
// This shared memory has the following:
// 2 bool arrays (each of size genome_size) for each thread in this block, as well as the genomes of the top two parents.
// Since bools are stored in chars, which wastes 7 bits, we use mybool and bit packing to
// in effect store a bool array as a collection of integers, with much less wastage.
// The bool arrays are mainly an optimisation for the crossover of the TSP, which needs a way to keep track of
// which children contain which cities. Using malloc inside that function is much, much slower than doing it this way.
// It is a bit less general, as Rosenbrock doesn't need the memory, but in general, I think methods could make use of this.
// For example, if the genome size is 1000, we need 32 integers to store one boolean array
// (each integer has 32 bits, and 32 * 32 = 1024). We do in effect waste those last 24 bits,
// but it's more of a headache to use them than not.
extern __shared__ mybool tempArrays[];
// This calculates the number of integers we need to store at least `genome_size` bools. Basically ceil(genome_size/(32 bits))
// We multiply this by two as each thread has access to two arrays.
int number_of_bits_each = ((genome_size - 1) / (sizeof(mybool) * 8) + 1) * 2;
// total number of bits for the boolean arrays = blockDim.x * (bits per thread).
int number_of_bits = number_of_bits_each * blockDim.x;
// The rest of this array will contain the top two parents from the previous generation, because we only use
// those for crossover. Again, this results in a massive speedup over using the global memory.
// copy parents in
int num_elems_to_copy = genome_size * 2;
int num_passes = 1 + ((num_elems_to_copy - 1) / blockDim.x);
T *topTwoParents = (T *)&tempArrays[number_of_bits];
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < num_elems_to_copy) {
topTwoParents[index] = dev_pop[index];
}
}
// set initial boolean arrays to 0.
num_passes = 1 + ((number_of_bits - 1) / blockDim.x);
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < number_of_bits) {
// copy to global mem starting point.
tempArrays[index] = 0;
}
}
__syncthreads();
// If we have an invalid thread index, then return
if (index >= population_size / 2)
return;
// This copies the best two individuals into the first two spots for the next generation.
// doing this reduction over an entire block is faster than only using one thread.
// First we find the best two individuals, and then copy them over.
// Note, we find the second best score somewhat fuzzily, and it is not guaranteed to
// always be exactly the second best score. This is because the main reduction can find the max
// quite easily, but the second max not so much.
// We basically take the second max to be one of the intermediate maxes in the reduction. It's not worth it
// to sort the array first, and it doesn't hamper results that much.
if (blockIdx.x == 0) {
// The third part in this shared array is a list of indices, so we can keep track of which genomes are the best.
int *indices = (int *)&topTwoParents[num_elems_to_copy];
float *probs = dev_probabilites_normalised;
// first set up
num_passes = 1 + ((population_size - 1) / blockDim.x);
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < population_size) {
// put index in
indices[index] = index;
}
}
__shared__ int second_max;
if (index == 0)
second_max = -1;
__syncthreads();
// do a reduction, while keeping the index too. This takes the entire array and
// puts it into an array of size blockDim.x, which is reduced in the following steps.
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < population_size) {
if (probs[index] > probs[tId]) {
probs[tId] = probs[index];
indices[tId] = indices[index];
}
}
}
// now reduce the above
for (int stride = blockDim.x / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tId < stride) {
if (probs[tId + stride] > probs[tId]) {
probs[tId] = probs[tId + stride];
indices[tId] = indices[tId + stride];
}
}
}
__syncthreads();
// make second_max equal to indices[1]. This sometimes results in max == second_max,
// but it doesn't affect the final score significantly.
int max = indices[0];
if (index == 0 && second_max == -1)
second_max = indices[1];
// store into shared.
__shared__ int all_max;
if (index == 0) {
all_max = max;
}
__syncthreads();
// Now simply copy over the parents into the new population.
auto parent1 = getIndividual<T>(all_max, dev_pop, genome_size);
auto parent2 = getIndividual<T>(second_max, dev_pop, genome_size);
auto child1 = getIndividual<T>(0, dev_next_pop, genome_size);
auto child2 = getIndividual<T>(1, dev_next_pop, genome_size);
int num_passes = 1 + (genome_size - 1) / blockDim.x;
for (int i = 0; i < num_passes; ++i) {
int k = i * blockDim.x;
if (k + tId < genome_size) {
child1[k + tId] = parent1[k + tId];
child2[k + tId] = parent2[k + tId];
}
}
__syncthreads();
}
// Now, index 0 (only one thread out of all of them), doesn't breed any two indivs, because
// the top 2 parents were stored in new_pop[0] and new_pop[1].
if (index == 0)
*dev_total_max = dev_probabilites_normalised[0]; // update best score.
else
for (int i = index * 2; i < (index * 2) + 1; i += 2) {
// Perform the actual breeding
// two parents => two offspring.
// Found that this form of delayed elitism performed the best, much better than
// choosing from the entire pop in proportion to their fitness.
// get the two parents
int parent1Index = cuda_rand_0n(index, dev_random, 2);
int parent2Index = cuda_rand_0n(index, dev_random, 2);
Individual<T> child1 = getIndividual<T>((i), dev_next_pop, genome_size);
Individual<T> child2 = getIndividual<T>((i + 1), dev_next_pop, genome_size);
Individual<T> parent1 = &topTwoParents[parent1Index * genome_size];
Individual<T> parent2 = &topTwoParents[parent2Index * genome_size];
if (parent1Index == parent2Index) {
// optimisation, memcpy if parents are the same.
memcpy(child1, parent1, sizeof(T) * genome_size);
memcpy(child2, parent2, sizeof(T) * genome_size);
} else {
// perform crossover parent1 + parent2 = child1, child2.
// We also pass in the array of booleans to facilitate faster crossover
function_to_do_crossover(
parent1, parent2,
child1, child2,
genome_size,
dev_random,
&tempArrays[threadIdx.x * number_of_bits_each]);
}
// Mutate the children.
// empirically it was found that cuda performed better with a higher mutation chance.
function_to_do_mutation(child1, 1, genome_size, dev_random, index);
function_to_do_mutation(child2, 1, genome_size, dev_random, index);
}
}
// Sums up the dev_probs in one block.
__global__ void get_total(float *dev_probs, int N, float *total) {
__shared__ float sharedTotal;
if (threadIdx.x == 0) {
sharedTotal = 0;
}
__syncthreads();
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N) {
atomicAdd(&sharedTotal, dev_probs[index]);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(total, sharedTotal);
}
}
// normalises all fitness values by total.
__global__ void divideAll(float *dev_probs, int N, float *dev_total) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N) {
dev_probs[index] /= *dev_total;
}
}
template <typename T>
void CudaPopulation<T>::divide() {
float my0 = 0;
// save 0 to device_total.
checkCudaErrors(hipDeviceSynchronize());
// First copy 0 to the total
checkCudaErrors(hipMemcpy(device_total, &my0, sizeof(float), hipMemcpyHostToDevice));
hipDeviceSynchronize();
// Sum up, using multiple blocks
hipLaunchKernelGGL(( get_total), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_probabilities, population_size, device_total);
checkCudaErrors(hipDeviceSynchronize());
// divide by total
hipLaunchKernelGGL(( divideAll), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_probabilities, population_size, device_total);
checkCudaErrors(hipDeviceSynchronize());
}
// Stores the Maximum score into the dev_all_scores array.
__global__ void get_total_and_add(const float *dev_max_prob, const float *dev_total_prob, float *dev_all_scores, int *current_score_count) {
// updates the intermediate scores.
float total_denormalised = *dev_max_prob; // * *dev_total_prob;
dev_all_scores[*current_score_count] = 1 / total_denormalised;
++(*current_score_count);
}
template <typename T>
void CudaPopulation<T>::saveScores() {
// Only one thread does this.
hipLaunchKernelGGL(( get_total_and_add), dim3(1), dim3(1), 0, 0, dev_total_max, device_total, dev_global_all_scores, dev_current_count_of_steps);
}
template <typename T>
std::vector<float> CudaPopulation<T>::get_all_measured_scores() {
std::vector<float> scores_vec;
int current_steps;
checkCudaErrors(hipMemcpy(¤t_steps, dev_current_count_of_steps, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
float *scores = new float[current_steps];
checkCudaErrors(hipMemcpy(scores, dev_global_all_scores, sizeof(float) * current_steps, hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
scores_vec.reserve(current_steps);
for (int i = 0; i < current_steps; ++i) {
scores_vec.push_back(scores[i]);
}
delete[](scores);
return scores_vec;
}
template <typename T>
std::vector<T> CudaPopulation<T>::get_final_best_solution() {
// The best agent from previous generation is in dev_pop[0]
return getVectorFromIndividual(dev_pop, genome_size);
} | 9c6347f3bd53ffc7fea4b5e299f6b0d81821cdfd.cu | #include <assert.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <chrono>
#include "CUDA/ga/CudaPopulation.h"
#include "CUDA/problems/salesman/CudaSalesmanUtils.h"
// Performs the whole process.
template <typename T>
void CudaPopulation<T>::solveProblem(int num_gens) {
for (int i = 0; i < num_gens; ++i) {
// evaluate all individuals
evaluate();
checkCudaErrors(cudaDeviceSynchronize());
// Since we only use the top 2 to breed, don't normalise the probabilities
// breed next generation
breed();
checkCudaErrors(cudaDeviceSynchronize());
// save scores.
saveScores();
checkCudaErrors(cudaDeviceSynchronize());
// swap pointers to current and next pops.
std::swap(dev_pop, dev_next_pop);
}
}
template <typename T>
void CudaPopulation<T>::init() {
// malloc on host
mem_next_pop = (T *)malloc(sizeof(T) * population_size * genome_size);
mem_pop = (T *)malloc(sizeof(T) * population_size * genome_size);
// probabilities and population memory on device.
checkCudaErrors(cudaMalloc(&dev_probabilities, sizeof(float) * population_size));
checkCudaErrors(cudaMalloc(&dev_mem_pop, sizeof(T) * population_size * genome_size));
checkCudaErrors(cudaMalloc(&dev_mem_next_pop, sizeof(T) * population_size * genome_size));
checkCudaErrors(cudaMalloc(&device_total, sizeof(float)));
checkCudaErrors(cudaMalloc(&dev_total_max, sizeof(float)));
pop = mem_pop;
next_pop = mem_next_pop;
dev_pop = dev_mem_pop;
dev_next_pop = dev_mem_next_pop;
// initialise cuda_random.
dev_random = get_init_cuda_rand(population_size);
// all scores
checkCudaErrors(cudaMalloc(&dev_global_all_scores, sizeof(float) * NUM_SCORE_POINTS));
// number of steps so far.
checkCudaErrors(cudaMalloc(&dev_current_count_of_steps, sizeof(int)));
int zero = 0;
checkCudaErrors(cudaMemcpy(dev_current_count_of_steps, &zero, sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
init_data_randomly();
checkCudaErrors(cudaDeviceSynchronize());
}
template <typename T>
CudaPopulation<T>::~CudaPopulation() {
// Free everything
cuda_rand_destroy(dev_random);
free(mem_next_pop);
free(mem_pop);
checkCudaErrors(cudaFree(dev_probabilities));
checkCudaErrors(cudaFree(dev_mem_pop));
checkCudaErrors(cudaFree(dev_mem_next_pop));
checkCudaErrors(cudaFree(device_total));
checkCudaErrors(cudaFree(dev_total_max));
checkCudaErrors(cudaFree(dev_global_all_scores));
checkCudaErrors(cudaFree(dev_current_count_of_steps));
}
// Simply calls main_cuda_breed
template <typename T>
void CudaPopulation<T>::breed() {
// Number of bytes for the bool arrays, as well as for the two top parents.
int num = (genome_size - 1) / (sizeof(mybool) * 8) + 1;
const int number_of_bytes = 2 * num * sizeof(mybool) * dimBlock.x + genome_size * sizeof(T) * 2 + population_size * sizeof(int);
main_cuda_breed<<<dimGridHalf, dimBlockHalf, number_of_bytes>>>(dev_pop, dev_next_pop, dev_probabilities, population_size, genome_size, dev_random,
get_crossover_func(),
get_mutation_func(),
dev_total_max);
}
/**
* @brief Main function to breed dev_pop into dev_next_pop.
*
*/
template <typename T>
__global__ void main_cuda_breed(T *dev_pop, T *dev_next_pop, float *dev_probabilites_normalised, int population_size, int genome_size,
curandState *dev_random,
crossover_func<T> function_to_do_crossover,
mutation_func<T> function_to_do_mutation,
float *dev_total_max) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int tId = threadIdx.x;
// This shared memory has the following:
// 2 bool arrays (each of size genome_size) for each thread in this block, as well as the genomes of the top two parents.
// Since bools are stored in chars, which wastes 7 bits, we use mybool and bit packing to
// in effect store a bool array as a collection of integers, with much less wastage.
// The bool arrays are mainly an optimisation for the crossover of the TSP, which needs a way to keep track of
// which children contain which cities. Using malloc inside that function is much, much slower than doing it this way.
// It is a bit less general, as Rosenbrock doesn't need the memory, but in general, I think methods could make use of this.
// For example, if the genome size is 1000, we need 32 integers to store one boolean array
// (each integer has 32 bits, and 32 * 32 = 1024). We do in effect waste those last 24 bits,
// but it's more of a headache to use them than not.
extern __shared__ mybool tempArrays[];
// This calculates the number of integers we need to store at least `genome_size` bools. Basically ceil(genome_size/(32 bits))
// We multiply this by two as each thread has access to two arrays.
int number_of_bits_each = ((genome_size - 1) / (sizeof(mybool) * 8) + 1) * 2;
// total number of bits for the boolean arrays = blockDim.x * (bits per thread).
int number_of_bits = number_of_bits_each * blockDim.x;
// The rest of this array will contain the top two parents from the previous generation, because we only use
// those for crossover. Again, this results in a massive speedup over using the global memory.
// copy parents in
int num_elems_to_copy = genome_size * 2;
int num_passes = 1 + ((num_elems_to_copy - 1) / blockDim.x);
T *topTwoParents = (T *)&tempArrays[number_of_bits];
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < num_elems_to_copy) {
topTwoParents[index] = dev_pop[index];
}
}
// set initial boolean arrays to 0.
num_passes = 1 + ((number_of_bits - 1) / blockDim.x);
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < number_of_bits) {
// copy to global mem starting point.
tempArrays[index] = 0;
}
}
__syncthreads();
// If we have an invalid thread index, then return
if (index >= population_size / 2)
return;
// This copies the best two individuals into the first two spots for the next generation.
// doing this reduction over an entire block is faster than only using one thread.
// First we find the best two individuals, and then copy them over.
// Note, we find the second best score somewhat fuzzily, and it is not guaranteed to
// always be exactly the second best score. This is because the main reduction can find the max
// quite easily, but the second max not so much.
// We basically take the second max to be one of the intermediate maxes in the reduction. It's not worth it
// to sort the array first, and it doesn't hamper results that much.
if (blockIdx.x == 0) {
// The third part in this shared array is a list of indices, so we can keep track of which genomes are the best.
int *indices = (int *)&topTwoParents[num_elems_to_copy];
float *probs = dev_probabilites_normalised;
// first set up
num_passes = 1 + ((population_size - 1) / blockDim.x);
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < population_size) {
// put index in
indices[index] = index;
}
}
__shared__ int second_max;
if (index == 0)
second_max = -1;
__syncthreads();
// do a reduction, while keeping the index too. This takes the entire array and
// puts it into an array of size blockDim.x, which is reduced in the following steps.
for (int i = 0; i < num_passes; ++i) {
int index = tId + i * blockDim.x;
if (index < population_size) {
if (probs[index] > probs[tId]) {
probs[tId] = probs[index];
indices[tId] = indices[index];
}
}
}
// now reduce the above
for (int stride = blockDim.x / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tId < stride) {
if (probs[tId + stride] > probs[tId]) {
probs[tId] = probs[tId + stride];
indices[tId] = indices[tId + stride];
}
}
}
__syncthreads();
// make second_max equal to indices[1]. This sometimes results in max == second_max,
// but it doesn't affect the final score significantly.
int max = indices[0];
if (index == 0 && second_max == -1)
second_max = indices[1];
// store into shared.
__shared__ int all_max;
if (index == 0) {
all_max = max;
}
__syncthreads();
// Now simply copy over the parents into the new population.
auto parent1 = getIndividual<T>(all_max, dev_pop, genome_size);
auto parent2 = getIndividual<T>(second_max, dev_pop, genome_size);
auto child1 = getIndividual<T>(0, dev_next_pop, genome_size);
auto child2 = getIndividual<T>(1, dev_next_pop, genome_size);
int num_passes = 1 + (genome_size - 1) / blockDim.x;
for (int i = 0; i < num_passes; ++i) {
int k = i * blockDim.x;
if (k + tId < genome_size) {
child1[k + tId] = parent1[k + tId];
child2[k + tId] = parent2[k + tId];
}
}
__syncthreads();
}
// Now, index 0 (only one thread out of all of them), doesn't breed any two indivs, because
// the top 2 parents were stored in new_pop[0] and new_pop[1].
if (index == 0)
*dev_total_max = dev_probabilites_normalised[0]; // update best score.
else
for (int i = index * 2; i < (index * 2) + 1; i += 2) {
// Perform the actual breeding
// two parents => two offspring.
// Found that this form of delayed elitism performed the best, much better than
// choosing from the entire pop in proportion to their fitness.
// get the two parents
int parent1Index = cuda_rand_0n(index, dev_random, 2);
int parent2Index = cuda_rand_0n(index, dev_random, 2);
Individual<T> child1 = getIndividual<T>((i), dev_next_pop, genome_size);
Individual<T> child2 = getIndividual<T>((i + 1), dev_next_pop, genome_size);
Individual<T> parent1 = &topTwoParents[parent1Index * genome_size];
Individual<T> parent2 = &topTwoParents[parent2Index * genome_size];
if (parent1Index == parent2Index) {
// optimisation, memcpy if parents are the same.
memcpy(child1, parent1, sizeof(T) * genome_size);
memcpy(child2, parent2, sizeof(T) * genome_size);
} else {
// perform crossover parent1 + parent2 = child1, child2.
// We also pass in the array of booleans to facilitate faster crossover
function_to_do_crossover(
parent1, parent2,
child1, child2,
genome_size,
dev_random,
&tempArrays[threadIdx.x * number_of_bits_each]);
}
// Mutate the children.
// empirically it was found that cuda performed better with a higher mutation chance.
function_to_do_mutation(child1, 1, genome_size, dev_random, index);
function_to_do_mutation(child2, 1, genome_size, dev_random, index);
}
}
// Sums up the dev_probs in one block.
__global__ void get_total(float *dev_probs, int N, float *total) {
__shared__ float sharedTotal;
if (threadIdx.x == 0) {
sharedTotal = 0;
}
__syncthreads();
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N) {
atomicAdd(&sharedTotal, dev_probs[index]);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(total, sharedTotal);
}
}
// normalises all fitness values by total.
__global__ void divideAll(float *dev_probs, int N, float *dev_total) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < N) {
dev_probs[index] /= *dev_total;
}
}
template <typename T>
void CudaPopulation<T>::divide() {
float my0 = 0;
// save 0 to device_total.
checkCudaErrors(cudaDeviceSynchronize());
// First copy 0 to the total
checkCudaErrors(cudaMemcpy(device_total, &my0, sizeof(float), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
// Sum up, using multiple blocks
get_total<<<dimGrid, dimBlock, 0>>>(dev_probabilities, population_size, device_total);
checkCudaErrors(cudaDeviceSynchronize());
// divide by total
divideAll<<<dimGrid, dimBlock, 0>>>(dev_probabilities, population_size, device_total);
checkCudaErrors(cudaDeviceSynchronize());
}
// Stores the Maximum score into the dev_all_scores array.
__global__ void get_total_and_add(const float *dev_max_prob, const float *dev_total_prob, float *dev_all_scores, int *current_score_count) {
// updates the intermediate scores.
float total_denormalised = *dev_max_prob; // * *dev_total_prob;
dev_all_scores[*current_score_count] = 1 / total_denormalised;
++(*current_score_count);
}
template <typename T>
void CudaPopulation<T>::saveScores() {
// Only one thread does this.
get_total_and_add<<<1, 1, 0>>>(dev_total_max, device_total, dev_global_all_scores, dev_current_count_of_steps);
}
template <typename T>
std::vector<float> CudaPopulation<T>::get_all_measured_scores() {
std::vector<float> scores_vec;
int current_steps;
checkCudaErrors(cudaMemcpy(¤t_steps, dev_current_count_of_steps, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
float *scores = new float[current_steps];
checkCudaErrors(cudaMemcpy(scores, dev_global_all_scores, sizeof(float) * current_steps, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
scores_vec.reserve(current_steps);
for (int i = 0; i < current_steps; ++i) {
scores_vec.push_back(scores[i]);
}
delete[](scores);
return scores_vec;
}
template <typename T>
std::vector<T> CudaPopulation<T>::get_final_best_solution() {
// The best agent from previous generation is in dev_pop[0]
return getVectorFromIndividual(dev_pop, genome_size);
} |
d29b3d6f6c7969bfdb68b2a663727178c4c43a1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "include/helper_cuda.h"
#include "include/utils.h"
namespace
{
struct Matrix {
void* data;
size_t pitch;
size_t rows;
size_t cols;
};
}
__device__ double sigmoid(double z)
{
return 1.0 / (1.0 + expf(-z));
}
__device__ double sigmoidDeriv(double z)
{
double temp = sigmoid(z);
return temp * (1.0 - temp);
}
__device__ double quadraticCostDeriv(double a, double y)
{
return a - y;
}
__global__ void dot(const double* vec1, const double* vec2, double* out)
{
extern __shared__ double cache[];
cache[threadIdx.x] = vec1[threadIdx.x] * vec2[threadIdx.x];
__syncthreads();
int reducedSize = blockDim.x / 2;
while (reducedSize > 0)
{
if (threadIdx.x < reducedSize)
{
cache[threadIdx.x] += cache[threadIdx.x + reducedSize];
}
if ((reducedSize > 1) && (reducedSize % 2) && (threadIdx.x == (reducedSize - 1)))
{
cache[threadIdx.x - 1] += cache[threadIdx.x];
}
reducedSize /= 2;
__syncthreads();
}
if (!threadIdx.x) *out = cache[0];
}
__global__ void gradientDescentStepW(Matrix w, Matrix partialDerivs, double learningRate, size_t subsetSize)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= w.cols || row >= w.rows) return;
double* wRow = (double*)(((uint8_t*)w.data) + row * w.pitch);
double* partialDerivsRow = (double*)(((uint8_t*)partialDerivs.data) + row * partialDerivs.pitch);
wRow[col] = wRow[col] - (learningRate / subsetSize) * partialDerivsRow[col];
}
__global__ void gradientDescentStepB(double* b, Matrix partialDerivs, double learningRate, size_t subsetSize)
{
size_t row = blockIdx.x * blockDim.x + threadIdx.x;
double pdSum = 0;
for (int col = 0; col < partialDerivs.rows; col++)
{
double* pdCol = (double*)(((uint8_t*)partialDerivs.data) + col * partialDerivs.pitch);
pdSum += pdCol[row];
}
b[row] = b[row] - (learningRate / (double)subsetSize) * pdSum;
}
__global__ void layerActivationCDP(Matrix w, const double* a, const double* b, double* zOut, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)w.data) + idx * w.pitch);
hipLaunchKernelGGL(( dot), dim3(1), dim3(w.cols), w.cols * sizeof(double), 0, row, a, &out[idx]);
hipDeviceSynchronize();
zOut[idx] = out[idx] + b[idx];
out[idx] = sigmoid(zOut[idx]);
}
__global__ void layerActivation(Matrix w, const double* a, const double* b, double* zOut, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)w.data) + idx * w.pitch);
double result = 0;
for (int col = 0; col < w.cols; col++)
{
result += row[col] * a[col];
}
result += b[idx];
zOut[idx] = result;
out[idx] = sigmoid(result);
}
__global__ void layerActivation(Matrix w, const Matrix a, const double* b, Matrix zOut, Matrix out)
{
size_t rowIdx = threadIdx.x;
size_t colIdx = blockIdx.x;
double* wRow = (double*)(((uint8_t*)w.data) + rowIdx * w.pitch);
double* aCol = (double*)(((uint8_t*)a.data) + colIdx * a.pitch);
double* zCol = (double*)(((uint8_t*)zOut.data) + colIdx * zOut.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + colIdx * out.pitch);
double result = 0;
for (int element = 0; element < w.cols; element++)
{
result += wRow[element] * aCol[element];
}
result += b[rowIdx];
zCol[rowIdx] = result;
outCol[rowIdx] = sigmoid(result);
}
__global__ void calcWPartiaDerivs(Matrix bPartialDerivs, Matrix a, Matrix out)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= out.cols || row >= out.rows) return;
double result = 0;
for (int element = 0; element < bPartialDerivs.rows; element++)
{
double* bpdCol = (double*)(((uint8_t*)bPartialDerivs.data) + element * bPartialDerivs.pitch);
double* aRow = (double*)(((uint8_t*)a.data) + element * a.pitch);
result += bpdCol[row] * aRow[col];
}
double* outRow = (double*)(((uint8_t*)out.data) + row * out.pitch);
outRow[col] = result;
}
__global__ void calcOutputError(Matrix a, Matrix y, Matrix z, Matrix out)
{
size_t rowIdx = threadIdx.x;
size_t colIdx = blockIdx.x;
double* aCol = (double*)(((uint8_t*)a.data) + colIdx * a.pitch);
double* yCol = (double*)(((uint8_t*)y.data) + colIdx * y.pitch);
double* zCol = (double*)(((uint8_t*)z.data) + colIdx * z.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + colIdx * out.pitch);
double costDeriv = quadraticCostDeriv(aCol[rowIdx], yCol[rowIdx]);
double sigDeriv = sigmoidDeriv(zCol[rowIdx]);
outCol[rowIdx] = costDeriv * sigDeriv;
}
__global__ void calcLayerErrorCDP(Matrix wTrans, const double* nextLayerError, const double* z, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)wTrans.data) + idx * wTrans.pitch);
hipLaunchKernelGGL(( dot), dim3(1), dim3(wTrans.cols), wTrans.cols * sizeof(double), 0, row, nextLayerError, &out[idx]);
hipDeviceSynchronize();
out[idx] *= sigmoidDeriv(z[idx]);
}
__global__ void calcLayerError(Matrix w, Matrix bPartialDerivs, Matrix z, Matrix out)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= out.cols || row >= out.rows) return;
double* bpdCol = (double*)(((uint8_t*)bPartialDerivs.data) + col * bPartialDerivs.pitch);
double* zCol = (double*)(((uint8_t*)z.data) + col * z.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + col * out.pitch);
double result = 0;
for (int element = 0; element < w.rows; element++)
{
double* wTransRow = (double*)(((uint8_t*)w.data) + element * w.pitch);
result += wTransRow[row] * bpdCol[element];
}
outCol[row] = result * sigmoidDeriv(zCol[row]);
}
class NeuralNetwork {
public:
NeuralNetwork(const std::vector<uint32_t>& pNeuronCounts, uint32_t subsetSize) :
m_subsetSize(subsetSize)
{
m_neuronCounts = pNeuronCounts;
m_layerCount = m_neuronCounts.size();
std::default_random_engine generator;
std::normal_distribution<double> distribution(0.0, 1.0);
m_as.push_back(Matrix{0});
// Allocate GPU mem for weights and biases and cost function partial derivatives with respect to them.
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
size_t rows = m_neuronCounts[layer + 1];
size_t cols = m_neuronCounts[layer];
// Initialize weights and copy to GPU
double* wHostData;
checkCudaErrors(hipHostMalloc(&wHostData, rows * cols * sizeof(double)));
for (size_t i = 0; i < rows * cols; i++)
{
wHostData[i] = distribution(generator);
}
Matrix w = {nullptr, 0, rows, cols};
checkCudaErrors(hipMallocPitch(&w.data, &w.pitch, cols * sizeof(double), rows));
m_w.push_back(w);
checkCudaErrors(hipMemcpy2D(w.data, w.pitch, wHostData, cols * sizeof(double), cols * sizeof(double), rows, hipMemcpyHostToDevice));
checkCudaErrors(hipHostFree(wHostData));
// Allocate cost function partial derivatives with respect to weights
checkCudaErrors(hipMallocPitch(&w.data, &w.pitch, cols * sizeof(double), rows));
m_wPartDerivs.push_back(w);
// Initialize biases and copy to GPU
double* bHost;
checkCudaErrors(hipHostMalloc(&bHost, rows * sizeof(double)));
for (int i = 0; i < rows; i++)
{
bHost[i] = distribution(generator);
}
double* bDevice;
checkCudaErrors(hipMalloc(&bDevice, rows * sizeof(double)));
m_b.push_back(bDevice);
checkCudaErrors(hipMemcpy(bDevice, bHost, rows * sizeof(double), hipMemcpyHostToDevice));
// Allocate cost function partial derivatives with respect to biases
Matrix bPartDerivs = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(hipMallocPitch(&bPartDerivs.data, &bPartDerivs.pitch, rows * sizeof(double), m_subsetSize));
m_bPartDerivs.push_back(bPartDerivs);
Matrix z = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(hipMallocPitch(&z.data, &z.pitch, rows * sizeof(double), m_subsetSize));
m_zs.push_back(z);
Matrix a = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(hipMallocPitch(&a.data, &a.pitch, rows * sizeof(double), m_subsetSize));
m_as.push_back(a);
}
}
~NeuralNetwork()
{
for (size_t layer = 0; layer < m_w.size() - 1; layer++)
{
checkCudaErrors(hipFree(m_w[layer].data));
checkCudaErrors(hipFree(m_b[layer]));
}
}
uint8_t recognizeDigit(double* x)
{
std::vector<double*> as;
for (size_t layer = 0; layer < m_layerCount; layer++)
{
double* a;
checkCudaErrors(hipMalloc(&a, m_neuronCounts[layer] * sizeof(double)));
if (!layer)
{
checkCudaErrors(hipMemcpy(a, x, m_neuronCounts[layer] * sizeof(double), hipMemcpyHostToDevice));
}
else
{
checkCudaErrors(hipMemset(a, 0, m_neuronCounts[layer] * sizeof(double)));
}
as.push_back(a);
}
double* dummyZ;
checkCudaErrors(hipMalloc(&dummyZ, m_neuronCounts[0] * sizeof(double)));
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
int32_t rows = m_neuronCounts[layer + 1];
int32_t cols = m_neuronCounts[layer];
hipLaunchKernelGGL(( layerActivation), dim3(1), dim3(rows), 0, 0, m_w[layer], as[layer], m_b[layer], dummyZ, as[layer + 1]);
}
std::vector<double> result(m_neuronCounts[m_layerCount - 1]);
checkCudaErrors(hipMemcpy(&result[0], as[as.size() - 1], result.size() * sizeof(double), hipMemcpyDeviceToHost));
for (auto a : as) checkCudaErrors(hipFree(a));
return static_cast<uint8_t>(std::max_element(result.begin(), result.begin() + 10) - result.begin());
}
void learn(std::vector<double*>& xs, std::vector<double*>& ys, uint32_t epochCount, double learningRate)
{
uint32_t subsetCount = static_cast<uint32_t>(::ceil(xs.size() / (double)m_subsetSize));
Matrix xSubset = { nullptr, 0, m_subsetSize, m_neuronCounts[0] };
Matrix ySubset = { nullptr, 0, m_subsetSize, m_neuronCounts[m_neuronCounts.size() - 1] };
checkCudaErrors(hipMallocPitch(&xSubset.data, &xSubset.pitch, m_neuronCounts[0] * sizeof(double), m_subsetSize));
checkCudaErrors(hipMallocPitch(&ySubset.data, &ySubset.pitch, m_neuronCounts[m_neuronCounts.size() - 1] * sizeof(double), m_subsetSize));
// Use stochastic gradient descent to learn the weights and biases
auto rngX = std::default_random_engine{};
auto rngY = rngX;
for (uint32_t epoch = 0; epoch < epochCount; epoch++)
{
printf("Start epoch %d\n", epoch);
// Rearrange the training data for each epoch
std::shuffle(std::begin(xs), std::end(xs), rngX);
std::shuffle(std::begin(ys), std::end(ys), rngY);
uint32_t elementsLeft = static_cast<uint32_t>(xs.size());
// Teach the network with a subset of the learning data
for (uint32_t subset = 0; subset < subsetCount; subset++)
{
// Copy the subset into the GPU memory
for (uint32_t i = 0; i < m_subsetSize; i++)
{
checkCudaErrors(hipMemcpy(((uint8_t*)xSubset.data) + i * xSubset.pitch,
xs[subset * m_subsetSize + i], m_neuronCounts[0] * sizeof(double), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(((uint8_t*)ySubset.data) + i * ySubset.pitch,
ys[subset * m_subsetSize + i], m_neuronCounts[m_neuronCounts.size() - 1] * sizeof(double), hipMemcpyHostToDevice));
}
updateSubset(xSubset, ySubset, learningRate);
}
}
}
void updateSubset(const Matrix& xs, const Matrix& ys, double learningRate)
{
// Calculate partial derivatives
backpropagate(xs, ys);
// Update weights and biases
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
int32_t rows = m_neuronCounts[layer + 1];
int32_t cols = m_neuronCounts[layer];
dim3 gridSize(static_cast<uint32_t>(::ceil((double)cols / (double)rows)));
dim3 blockSize(rows, rows);
hipLaunchKernelGGL(( gradientDescentStepW), dim3(gridSize), dim3(blockSize), 0, 0, m_w[layer], m_wPartDerivs[layer], learningRate, m_subsetSize);
hipLaunchKernelGGL(( gradientDescentStepB), dim3(1), dim3(rows), 0, 0, m_b[layer], m_bPartDerivs[layer], learningRate, m_subsetSize);
}
}
void backpropagate(const Matrix& x, const Matrix& y)
{
m_as[0] = x;
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
uint32_t rows = m_neuronCounts[layer + 1];
uint32_t cols = m_neuronCounts[layer];
Matrix a = m_as[layer];
Matrix aNext = m_as[layer + 1];
Matrix z = m_zs[layer];
uint32_t gridCount = static_cast<uint32_t>(a.rows);
hipLaunchKernelGGL(( layerActivation), dim3(gridCount), dim3(rows), 0, 0, m_w[layer], a, m_b[layer], z, aNext);
}
uint32_t lastLayerSize = m_neuronCounts[m_neuronCounts.size() - 1];
uint32_t gridCount = static_cast<uint32_t>(y.rows);
hipLaunchKernelGGL(( calcOutputError), dim3(gridCount), dim3(lastLayerSize), 0, 0, m_as[m_as.size() - 1], y, m_zs[m_zs.size() - 1], m_bPartDerivs[m_bPartDerivs.size() - 1]);
dim3 blockSize(static_cast<uint32_t>(m_wPartDerivs[m_wPartDerivs.size() - 1].cols),
static_cast<uint32_t>(m_wPartDerivs[m_wPartDerivs.size() - 1].rows));
hipLaunchKernelGGL(( calcWPartiaDerivs), dim3(1), dim3(blockSize), 0, 0, m_bPartDerivs[m_bPartDerivs.size() - 1], m_as[m_as.size() - 2], m_wPartDerivs[m_wPartDerivs.size() - 1]);
for (size_t layer = 2; layer < m_layerCount; layer++)
{
Matrix z = m_zs[m_zs.size() - layer];
dim3 blockSize(static_cast<uint32_t>(m_bPartDerivs[m_bPartDerivs.size() - layer].cols),
static_cast<uint32_t>(m_bPartDerivs[m_bPartDerivs.size() - layer].rows));
hipLaunchKernelGGL(( calcLayerError), dim3(1), dim3(blockSize), 0, 0, m_w[m_w.size() - layer + 1], m_bPartDerivs[m_bPartDerivs.size() - layer + 1], z, m_bPartDerivs[m_bPartDerivs.size() - layer]);
size_t rows = m_wPartDerivs[m_wPartDerivs.size() - layer].rows;
size_t cols = m_wPartDerivs[m_wPartDerivs.size() - layer].cols;
dim3 gridSize(static_cast<uint32_t>(::ceil((double)cols / (double)rows)));
blockSize = dim3(static_cast<uint32_t>(rows), static_cast<uint32_t>(rows));
hipLaunchKernelGGL(( calcWPartiaDerivs), dim3(gridSize), dim3(blockSize), 0, 0, m_bPartDerivs[m_bPartDerivs.size() - layer], m_as[m_as.size() - layer - 1], m_wPartDerivs[m_wPartDerivs.size() - layer]);
}
}
private:
size_t m_layerCount;
std::vector<uint32_t> m_neuronCounts;
uint32_t m_subsetSize;
std::vector<Matrix> m_w;
std::vector<double*> m_b;
std::vector<Matrix> m_wPartDerivs;
std::vector<Matrix> m_bPartDerivs;
std::vector<Matrix> m_as;
std::vector<Matrix> m_zs;
};
int main()
{
checkCudaErrors(hipSetDevice(0));
std::vector<double*> xs;
std::vector<double*> ys;
uint32_t elementSize = 0;
loadData(L"MNIST/train-images.idx3-ubyte", xs, L"MNIST/train-labels.idx1-ubyte", ys, elementSize);
std::vector<uint32_t> neuronCounts = { elementSize, 30, 30, 10 };
NeuralNetwork network(neuronCounts, 25);
network.learn(xs, ys, 10, 3.0);
for (int i = 0; i < xs.size(); i++)
{
delete[] xs[i];
delete[] ys[i];
}
printf("Finished learning. Now checking test images...\n");
std::vector<double*> testImages;
std::vector<double*> testLabels;
loadData(L"MNIST/t10k-images.idx3-ubyte", testImages, L"MNIST/t10k-labels.idx1-ubyte", testLabels, elementSize);
int corrects = 0;
for (int i = 0; i < testImages.size(); i++)
{
/*for (int y = 0; y < 28; y++)
{
for (int x = 0; x < 28; x++)
{
if (testImages[0][y * 28 + x] > 0) printf("0");
else printf(" ");
}
printf("\n");
}*/
int image = i;
uint8_t label = static_cast<uint8_t>(std::max_element(testLabels[image], testLabels[image] + 10) - testLabels[image]);
uint8_t digit = network.recognizeDigit(testImages[image]);
if (label == digit) corrects++;
//printf("Real value is %d, network thinks it's %d\n", label, digit);
}
printf("%.2f%% were correct", (double)corrects / (double)testImages.size() * 100.0f);
for (int i = 0; i < testImages.size(); i++)
{
delete[] testImages[i];
delete[] testLabels[i];
}
/*int cols = 784;
int rows = 30;
double* hostPtr;
checkCudaErrors(hipHostMalloc(&hostPtr, rows * cols * sizeof(double)));
for (int i = 0; i < rows * cols; i++) hostPtr[i] = ((i % (matSize + 1)) == 0) ? 1.0 : 0.0;
hipPitchedPtr deviceMatrix;
hipExtent extent = make_hipExtent(cols * sizeof(double), rows, 1);
checkCudaErrors(hipMalloc3D(&deviceMatrix, extent));
hipPitchedPtr hostMatrix;
hostMatrix.ptr = hostPtr;
hostMatrix.pitch = matSize * sizeof(double);
hostMatrix.xsize = deviceMatrix.xsize;
hostMatrix.ysize = deviceMatrix.ysize;
hipMemcpy3DParms cpyParms = { 0 };
cpyParms.srcPtr = hostMatrix;
cpyParms.dstPtr = deviceMatrix;
cpyParms.extent = extent;
cpyParms.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(&cpyParms));
double* vector;
checkCudaErrors(hipMallocManaged(&vector, matSize * sizeof(double)));
for (int i = 0; i < matSize; i++) vector[i] = (i + 1.0) * - 0.1;
double* result;
checkCudaErrors(hipMallocManaged(&result, matSize * sizeof(double)));
layerActivation<<<1, matSize>>>(deviceMatrix.ptr, deviceMatrix.pitch, matSize, vector, vector, result);
for (int i = 0; i < matSize; i++)
output("%f, ", result[i]);
output("\n");*/
return 0;
}
| d29b3d6f6c7969bfdb68b2a663727178c4c43a1d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "include/helper_cuda.h"
#include "include/utils.h"
namespace
{
struct Matrix {
void* data;
size_t pitch;
size_t rows;
size_t cols;
};
}
__device__ double sigmoid(double z)
{
return 1.0 / (1.0 + expf(-z));
}
__device__ double sigmoidDeriv(double z)
{
double temp = sigmoid(z);
return temp * (1.0 - temp);
}
__device__ double quadraticCostDeriv(double a, double y)
{
return a - y;
}
__global__ void dot(const double* vec1, const double* vec2, double* out)
{
extern __shared__ double cache[];
cache[threadIdx.x] = vec1[threadIdx.x] * vec2[threadIdx.x];
__syncthreads();
int reducedSize = blockDim.x / 2;
while (reducedSize > 0)
{
if (threadIdx.x < reducedSize)
{
cache[threadIdx.x] += cache[threadIdx.x + reducedSize];
}
if ((reducedSize > 1) && (reducedSize % 2) && (threadIdx.x == (reducedSize - 1)))
{
cache[threadIdx.x - 1] += cache[threadIdx.x];
}
reducedSize /= 2;
__syncthreads();
}
if (!threadIdx.x) *out = cache[0];
}
__global__ void gradientDescentStepW(Matrix w, Matrix partialDerivs, double learningRate, size_t subsetSize)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= w.cols || row >= w.rows) return;
double* wRow = (double*)(((uint8_t*)w.data) + row * w.pitch);
double* partialDerivsRow = (double*)(((uint8_t*)partialDerivs.data) + row * partialDerivs.pitch);
wRow[col] = wRow[col] - (learningRate / subsetSize) * partialDerivsRow[col];
}
__global__ void gradientDescentStepB(double* b, Matrix partialDerivs, double learningRate, size_t subsetSize)
{
size_t row = blockIdx.x * blockDim.x + threadIdx.x;
double pdSum = 0;
for (int col = 0; col < partialDerivs.rows; col++)
{
double* pdCol = (double*)(((uint8_t*)partialDerivs.data) + col * partialDerivs.pitch);
pdSum += pdCol[row];
}
b[row] = b[row] - (learningRate / (double)subsetSize) * pdSum;
}
__global__ void layerActivationCDP(Matrix w, const double* a, const double* b, double* zOut, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)w.data) + idx * w.pitch);
dot<<<1, w.cols, w.cols * sizeof(double)>>>(row, a, &out[idx]);
cudaDeviceSynchronize();
zOut[idx] = out[idx] + b[idx];
out[idx] = sigmoid(zOut[idx]);
}
__global__ void layerActivation(Matrix w, const double* a, const double* b, double* zOut, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)w.data) + idx * w.pitch);
double result = 0;
for (int col = 0; col < w.cols; col++)
{
result += row[col] * a[col];
}
result += b[idx];
zOut[idx] = result;
out[idx] = sigmoid(result);
}
__global__ void layerActivation(Matrix w, const Matrix a, const double* b, Matrix zOut, Matrix out)
{
size_t rowIdx = threadIdx.x;
size_t colIdx = blockIdx.x;
double* wRow = (double*)(((uint8_t*)w.data) + rowIdx * w.pitch);
double* aCol = (double*)(((uint8_t*)a.data) + colIdx * a.pitch);
double* zCol = (double*)(((uint8_t*)zOut.data) + colIdx * zOut.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + colIdx * out.pitch);
double result = 0;
for (int element = 0; element < w.cols; element++)
{
result += wRow[element] * aCol[element];
}
result += b[rowIdx];
zCol[rowIdx] = result;
outCol[rowIdx] = sigmoid(result);
}
__global__ void calcWPartiaDerivs(Matrix bPartialDerivs, Matrix a, Matrix out)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= out.cols || row >= out.rows) return;
double result = 0;
for (int element = 0; element < bPartialDerivs.rows; element++)
{
double* bpdCol = (double*)(((uint8_t*)bPartialDerivs.data) + element * bPartialDerivs.pitch);
double* aRow = (double*)(((uint8_t*)a.data) + element * a.pitch);
result += bpdCol[row] * aRow[col];
}
double* outRow = (double*)(((uint8_t*)out.data) + row * out.pitch);
outRow[col] = result;
}
__global__ void calcOutputError(Matrix a, Matrix y, Matrix z, Matrix out)
{
size_t rowIdx = threadIdx.x;
size_t colIdx = blockIdx.x;
double* aCol = (double*)(((uint8_t*)a.data) + colIdx * a.pitch);
double* yCol = (double*)(((uint8_t*)y.data) + colIdx * y.pitch);
double* zCol = (double*)(((uint8_t*)z.data) + colIdx * z.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + colIdx * out.pitch);
double costDeriv = quadraticCostDeriv(aCol[rowIdx], yCol[rowIdx]);
double sigDeriv = sigmoidDeriv(zCol[rowIdx]);
outCol[rowIdx] = costDeriv * sigDeriv;
}
__global__ void calcLayerErrorCDP(Matrix wTrans, const double* nextLayerError, const double* z, double* out)
{
size_t idx = threadIdx.x;
double* row = (double*)(((uint8_t*)wTrans.data) + idx * wTrans.pitch);
dot<<<1, wTrans.cols, wTrans.cols * sizeof(double)>>>(row, nextLayerError, &out[idx]);
cudaDeviceSynchronize();
out[idx] *= sigmoidDeriv(z[idx]);
}
__global__ void calcLayerError(Matrix w, Matrix bPartialDerivs, Matrix z, Matrix out)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
if (col >= out.cols || row >= out.rows) return;
double* bpdCol = (double*)(((uint8_t*)bPartialDerivs.data) + col * bPartialDerivs.pitch);
double* zCol = (double*)(((uint8_t*)z.data) + col * z.pitch);
double* outCol = (double*)(((uint8_t*)out.data) + col * out.pitch);
double result = 0;
for (int element = 0; element < w.rows; element++)
{
double* wTransRow = (double*)(((uint8_t*)w.data) + element * w.pitch);
result += wTransRow[row] * bpdCol[element];
}
outCol[row] = result * sigmoidDeriv(zCol[row]);
}
class NeuralNetwork {
public:
NeuralNetwork(const std::vector<uint32_t>& pNeuronCounts, uint32_t subsetSize) :
m_subsetSize(subsetSize)
{
m_neuronCounts = pNeuronCounts;
m_layerCount = m_neuronCounts.size();
std::default_random_engine generator;
std::normal_distribution<double> distribution(0.0, 1.0);
m_as.push_back(Matrix{0});
// Allocate GPU mem for weights and biases and cost function partial derivatives with respect to them.
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
size_t rows = m_neuronCounts[layer + 1];
size_t cols = m_neuronCounts[layer];
// Initialize weights and copy to GPU
double* wHostData;
checkCudaErrors(cudaMallocHost(&wHostData, rows * cols * sizeof(double)));
for (size_t i = 0; i < rows * cols; i++)
{
wHostData[i] = distribution(generator);
}
Matrix w = {nullptr, 0, rows, cols};
checkCudaErrors(cudaMallocPitch(&w.data, &w.pitch, cols * sizeof(double), rows));
m_w.push_back(w);
checkCudaErrors(cudaMemcpy2D(w.data, w.pitch, wHostData, cols * sizeof(double), cols * sizeof(double), rows, cudaMemcpyHostToDevice));
checkCudaErrors(cudaFreeHost(wHostData));
// Allocate cost function partial derivatives with respect to weights
checkCudaErrors(cudaMallocPitch(&w.data, &w.pitch, cols * sizeof(double), rows));
m_wPartDerivs.push_back(w);
// Initialize biases and copy to GPU
double* bHost;
checkCudaErrors(cudaMallocHost(&bHost, rows * sizeof(double)));
for (int i = 0; i < rows; i++)
{
bHost[i] = distribution(generator);
}
double* bDevice;
checkCudaErrors(cudaMalloc(&bDevice, rows * sizeof(double)));
m_b.push_back(bDevice);
checkCudaErrors(cudaMemcpy(bDevice, bHost, rows * sizeof(double), cudaMemcpyHostToDevice));
// Allocate cost function partial derivatives with respect to biases
Matrix bPartDerivs = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(cudaMallocPitch(&bPartDerivs.data, &bPartDerivs.pitch, rows * sizeof(double), m_subsetSize));
m_bPartDerivs.push_back(bPartDerivs);
Matrix z = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(cudaMallocPitch(&z.data, &z.pitch, rows * sizeof(double), m_subsetSize));
m_zs.push_back(z);
Matrix a = { nullptr, 0, m_subsetSize, rows };
checkCudaErrors(cudaMallocPitch(&a.data, &a.pitch, rows * sizeof(double), m_subsetSize));
m_as.push_back(a);
}
}
~NeuralNetwork()
{
for (size_t layer = 0; layer < m_w.size() - 1; layer++)
{
checkCudaErrors(cudaFree(m_w[layer].data));
checkCudaErrors(cudaFree(m_b[layer]));
}
}
uint8_t recognizeDigit(double* x)
{
std::vector<double*> as;
for (size_t layer = 0; layer < m_layerCount; layer++)
{
double* a;
checkCudaErrors(cudaMalloc(&a, m_neuronCounts[layer] * sizeof(double)));
if (!layer)
{
checkCudaErrors(cudaMemcpy(a, x, m_neuronCounts[layer] * sizeof(double), cudaMemcpyHostToDevice));
}
else
{
checkCudaErrors(cudaMemset(a, 0, m_neuronCounts[layer] * sizeof(double)));
}
as.push_back(a);
}
double* dummyZ;
checkCudaErrors(cudaMalloc(&dummyZ, m_neuronCounts[0] * sizeof(double)));
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
int32_t rows = m_neuronCounts[layer + 1];
int32_t cols = m_neuronCounts[layer];
layerActivation<<<1, rows>>>(m_w[layer], as[layer], m_b[layer], dummyZ, as[layer + 1]);
}
std::vector<double> result(m_neuronCounts[m_layerCount - 1]);
checkCudaErrors(cudaMemcpy(&result[0], as[as.size() - 1], result.size() * sizeof(double), cudaMemcpyDeviceToHost));
for (auto a : as) checkCudaErrors(cudaFree(a));
return static_cast<uint8_t>(std::max_element(result.begin(), result.begin() + 10) - result.begin());
}
void learn(std::vector<double*>& xs, std::vector<double*>& ys, uint32_t epochCount, double learningRate)
{
uint32_t subsetCount = static_cast<uint32_t>(std::ceil(xs.size() / (double)m_subsetSize));
Matrix xSubset = { nullptr, 0, m_subsetSize, m_neuronCounts[0] };
Matrix ySubset = { nullptr, 0, m_subsetSize, m_neuronCounts[m_neuronCounts.size() - 1] };
checkCudaErrors(cudaMallocPitch(&xSubset.data, &xSubset.pitch, m_neuronCounts[0] * sizeof(double), m_subsetSize));
checkCudaErrors(cudaMallocPitch(&ySubset.data, &ySubset.pitch, m_neuronCounts[m_neuronCounts.size() - 1] * sizeof(double), m_subsetSize));
// Use stochastic gradient descent to learn the weights and biases
auto rngX = std::default_random_engine{};
auto rngY = rngX;
for (uint32_t epoch = 0; epoch < epochCount; epoch++)
{
printf("Start epoch %d\n", epoch);
// Rearrange the training data for each epoch
std::shuffle(std::begin(xs), std::end(xs), rngX);
std::shuffle(std::begin(ys), std::end(ys), rngY);
uint32_t elementsLeft = static_cast<uint32_t>(xs.size());
// Teach the network with a subset of the learning data
for (uint32_t subset = 0; subset < subsetCount; subset++)
{
// Copy the subset into the GPU memory
for (uint32_t i = 0; i < m_subsetSize; i++)
{
checkCudaErrors(cudaMemcpy(((uint8_t*)xSubset.data) + i * xSubset.pitch,
xs[subset * m_subsetSize + i], m_neuronCounts[0] * sizeof(double), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(((uint8_t*)ySubset.data) + i * ySubset.pitch,
ys[subset * m_subsetSize + i], m_neuronCounts[m_neuronCounts.size() - 1] * sizeof(double), cudaMemcpyHostToDevice));
}
updateSubset(xSubset, ySubset, learningRate);
}
}
}
void updateSubset(const Matrix& xs, const Matrix& ys, double learningRate)
{
// Calculate partial derivatives
backpropagate(xs, ys);
// Update weights and biases
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
int32_t rows = m_neuronCounts[layer + 1];
int32_t cols = m_neuronCounts[layer];
dim3 gridSize(static_cast<uint32_t>(std::ceil((double)cols / (double)rows)));
dim3 blockSize(rows, rows);
gradientDescentStepW<<<gridSize, blockSize>>>(m_w[layer], m_wPartDerivs[layer], learningRate, m_subsetSize);
gradientDescentStepB<<<1, rows>>>(m_b[layer], m_bPartDerivs[layer], learningRate, m_subsetSize);
}
}
void backpropagate(const Matrix& x, const Matrix& y)
{
m_as[0] = x;
for (size_t layer = 0; layer < m_layerCount - 1; layer++)
{
uint32_t rows = m_neuronCounts[layer + 1];
uint32_t cols = m_neuronCounts[layer];
Matrix a = m_as[layer];
Matrix aNext = m_as[layer + 1];
Matrix z = m_zs[layer];
uint32_t gridCount = static_cast<uint32_t>(a.rows);
layerActivation<<<gridCount, rows>>>(m_w[layer], a, m_b[layer], z, aNext);
}
uint32_t lastLayerSize = m_neuronCounts[m_neuronCounts.size() - 1];
uint32_t gridCount = static_cast<uint32_t>(y.rows);
calcOutputError<<<gridCount, lastLayerSize>>>(m_as[m_as.size() - 1], y, m_zs[m_zs.size() - 1], m_bPartDerivs[m_bPartDerivs.size() - 1]);
dim3 blockSize(static_cast<uint32_t>(m_wPartDerivs[m_wPartDerivs.size() - 1].cols),
static_cast<uint32_t>(m_wPartDerivs[m_wPartDerivs.size() - 1].rows));
calcWPartiaDerivs<<<1, blockSize>>>(m_bPartDerivs[m_bPartDerivs.size() - 1], m_as[m_as.size() - 2], m_wPartDerivs[m_wPartDerivs.size() - 1]);
for (size_t layer = 2; layer < m_layerCount; layer++)
{
Matrix z = m_zs[m_zs.size() - layer];
dim3 blockSize(static_cast<uint32_t>(m_bPartDerivs[m_bPartDerivs.size() - layer].cols),
static_cast<uint32_t>(m_bPartDerivs[m_bPartDerivs.size() - layer].rows));
calcLayerError<<<1, blockSize>>>(m_w[m_w.size() - layer + 1], m_bPartDerivs[m_bPartDerivs.size() - layer + 1], z, m_bPartDerivs[m_bPartDerivs.size() - layer]);
size_t rows = m_wPartDerivs[m_wPartDerivs.size() - layer].rows;
size_t cols = m_wPartDerivs[m_wPartDerivs.size() - layer].cols;
dim3 gridSize(static_cast<uint32_t>(std::ceil((double)cols / (double)rows)));
blockSize = dim3(static_cast<uint32_t>(rows), static_cast<uint32_t>(rows));
calcWPartiaDerivs<<<gridSize, blockSize>>>(m_bPartDerivs[m_bPartDerivs.size() - layer], m_as[m_as.size() - layer - 1], m_wPartDerivs[m_wPartDerivs.size() - layer]);
}
}
private:
size_t m_layerCount;
std::vector<uint32_t> m_neuronCounts;
uint32_t m_subsetSize;
std::vector<Matrix> m_w;
std::vector<double*> m_b;
std::vector<Matrix> m_wPartDerivs;
std::vector<Matrix> m_bPartDerivs;
std::vector<Matrix> m_as;
std::vector<Matrix> m_zs;
};
int main()
{
checkCudaErrors(cudaSetDevice(0));
std::vector<double*> xs;
std::vector<double*> ys;
uint32_t elementSize = 0;
loadData(L"MNIST/train-images.idx3-ubyte", xs, L"MNIST/train-labels.idx1-ubyte", ys, elementSize);
std::vector<uint32_t> neuronCounts = { elementSize, 30, 30, 10 };
NeuralNetwork network(neuronCounts, 25);
network.learn(xs, ys, 10, 3.0);
for (int i = 0; i < xs.size(); i++)
{
delete[] xs[i];
delete[] ys[i];
}
printf("Finished learning. Now checking test images...\n");
std::vector<double*> testImages;
std::vector<double*> testLabels;
loadData(L"MNIST/t10k-images.idx3-ubyte", testImages, L"MNIST/t10k-labels.idx1-ubyte", testLabels, elementSize);
int corrects = 0;
for (int i = 0; i < testImages.size(); i++)
{
/*for (int y = 0; y < 28; y++)
{
for (int x = 0; x < 28; x++)
{
if (testImages[0][y * 28 + x] > 0) printf("0");
else printf(" ");
}
printf("\n");
}*/
int image = i;
uint8_t label = static_cast<uint8_t>(std::max_element(testLabels[image], testLabels[image] + 10) - testLabels[image]);
uint8_t digit = network.recognizeDigit(testImages[image]);
if (label == digit) corrects++;
//printf("Real value is %d, network thinks it's %d\n", label, digit);
}
printf("%.2f%% were correct", (double)corrects / (double)testImages.size() * 100.0f);
for (int i = 0; i < testImages.size(); i++)
{
delete[] testImages[i];
delete[] testLabels[i];
}
/*int cols = 784;
int rows = 30;
double* hostPtr;
checkCudaErrors(cudaMallocHost(&hostPtr, rows * cols * sizeof(double)));
for (int i = 0; i < rows * cols; i++) hostPtr[i] = ((i % (matSize + 1)) == 0) ? 1.0 : 0.0;
cudaPitchedPtr deviceMatrix;
cudaExtent extent = make_cudaExtent(cols * sizeof(double), rows, 1);
checkCudaErrors(cudaMalloc3D(&deviceMatrix, extent));
cudaPitchedPtr hostMatrix;
hostMatrix.ptr = hostPtr;
hostMatrix.pitch = matSize * sizeof(double);
hostMatrix.xsize = deviceMatrix.xsize;
hostMatrix.ysize = deviceMatrix.ysize;
cudaMemcpy3DParms cpyParms = { 0 };
cpyParms.srcPtr = hostMatrix;
cpyParms.dstPtr = deviceMatrix;
cpyParms.extent = extent;
cpyParms.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(&cpyParms));
double* vector;
checkCudaErrors(cudaMallocManaged(&vector, matSize * sizeof(double)));
for (int i = 0; i < matSize; i++) vector[i] = (i + 1.0) * - 0.1;
double* result;
checkCudaErrors(cudaMallocManaged(&result, matSize * sizeof(double)));
layerActivation<<<1, matSize>>>(deviceMatrix.ptr, deviceMatrix.pitch, matSize, vector, vector, result);
for (int i = 0; i < matSize; i++)
output("%f, ", result[i]);
output("\n");*/
return 0;
}
|
5416d2a1464ff198f008833daae75d4885bbf7fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
__global__ void transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_element_tiled16(float in[], float out[])
{
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
fill_matrix(in);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();hipLaunchKernelGGL((
transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
printf("Transpose_serial: %g ms.\n",timer.Elapsed());
timer.Start();hipLaunchKernelGGL((
transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
printf("Transpose_per_row: %g ms.\n",timer.Elapsed());
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();hipLaunchKernelGGL((
transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
printf("Transpose_per_element: %g ms.\n",timer.Elapsed());
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();hipLaunchKernelGGL((
transpose_parallel_per_element_tiled16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
printf("Transpose_tiled: %g ms.\n",timer.Elapsed());
return 0;
}
| 5416d2a1464ff198f008833daae75d4885bbf7fd.cu | #include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
__global__ void transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
__global__ void transpose_parallel_per_element_tiled16(float in[], float out[])
{
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
fill_matrix(in);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
printf("Transpose_serial: %g ms.\n",timer.Elapsed());
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
printf("Transpose_per_row: %g ms.\n",timer.Elapsed());
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
timer.Start();
transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
printf("Transpose_per_element: %g ms.\n",timer.Elapsed());
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
printf("Transpose_tiled: %g ms.\n",timer.Elapsed());
return 0;
}
|
0b48cf36d772e2f4f0d4949a574a05308c94a680.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d3r-32x32-1-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 37
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.25000f * A[t%2][i][j][k]
+ 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2]
+ 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1]
+ 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3]
+ 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k]
+ 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k]
+ 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k]
+ 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k]
+ 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k]
+ 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 0b48cf36d772e2f4f0d4949a574a05308c94a680.cu | #include <assert.h>
#include <stdio.h>
#include "star3d3r-32x32-1-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 37
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.25000f * A[t%2][i][j][k]
+ 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2]
+ 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1]
+ 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3]
+ 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k]
+ 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k]
+ 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k]
+ 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k]
+ 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k]
+ 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a7426e7e257b95a71b39765448a03697120894e2.hip | // !!! This is a file automatically generated by hipify!!!
__device__ void cavi_implementor::generate_weights(){
hiprandState_t state;
hiprand_init(thread_id, b_count + 1, 0, &state);
double sum_weights = 0;
for(int i = 0; i < n_samples; i++){
sum_weights += (
weights[i] = -log(hiprand_uniform_double(&state))
);
}
for(int i = 0; i < n_samples; i++){
weights[i] *= n_samples/sum_weights;
}
// by the way we can initialize estimates
}
| a7426e7e257b95a71b39765448a03697120894e2.cu | __device__ void cavi_implementor::generate_weights(){
curandState state;
curand_init(thread_id, b_count + 1, 0, &state);
double sum_weights = 0;
for(int i = 0; i < n_samples; i++){
sum_weights += (
weights[i] = -log(curand_uniform_double(&state))
);
}
for(int i = 0; i < n_samples; i++){
weights[i] *= n_samples/sum_weights;
}
// by the way we can initialize estimates
}
|
2961514ff143415a6b6d97dd93202ec38bbfb202.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char bessel_y0_name[] = "bessel_y0_forward";
void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda);
} // namespace at::native
| 2961514ff143415a6b6d97dd93202ec38bbfb202.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char bessel_y0_name[] = "bessel_y0_forward";
void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda);
} // namespace at::native
|
7b3e50534decd5efb6645732242f6216cc862b8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies
* with kernel execution. This sample illustrates the usage of CUDA streams to
* achieve overlapping of kernel execution with copying data to and from the device.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKname = "simpleMultiCopy";
#include <stdio.h>
#include <cutil_inline.h>
#include <shrUtils.h>
__global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < N ) {
for( int i=0; i<inner_reps; ++i ) {
g_out[idx] = g_in[idx] + 1;
}
}
}
#define STREAM_COUNT 4
// Uncomment to simulate data source/sink IO times
//#define SIMULATE_IO
int *h_data_source;
int *h_data_sink;
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
hipEvent_t cycleDone[STREAM_COUNT];
hipStream_t stream[STREAM_COUNT];
hipEvent_t start, stop;
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5;
int memsize;
dim3 block(512);
dim3 grid;
int thread_blocks;
float processWithStreams(int streams_used);
void init();
bool test();
int main(int argc, char *argv[])
{
int cuda_device = 0;
float scale_factor;
printf("[%s]\n", sSDKname);
if(shrCheckCmdLineFlag(argc, (const char**)argv, "device"))
{
cuda_device = cutilDeviceInit(argc, argv);
if (cuda_device < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
}
hipDeviceProp_t deviceProp;
cutilSafeCall( hipGetDeviceProperties(&deviceProp, cuda_device) );
// Anything that is less than 4 SM's will have scaled down workload
scale_factor = max((4.0f / (float)deviceProp.multiProcessorCount), 1.0f);
N = (int)( (float)N / scale_factor );
printf("> Device name: %s\n", deviceProp.name);
printf("> CUDA Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor,
deviceProp.multiProcessorCount);
printf("> scale_factor = %.2f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", N);
memsize = N * sizeof(int);
thread_blocks = N / block.x;
grid.x = thread_blocks % 65535;
grid.y = (thread_blocks / 65535 + 1);
// Allocate resources
h_data_source = (int*) malloc(memsize);
h_data_sink = (int*) malloc(memsize);
for( int i =0; i<STREAM_COUNT; ++i ) {
CUDA_SAFE_CALL( hipHostMalloc(&h_data_in[i], memsize,
hipHostMallocDefault) );
CUDA_SAFE_CALL( hipMalloc(&d_data_in[i], memsize) );
CUDA_SAFE_CALL( hipHostMalloc(&h_data_out[i], memsize,
hipHostMallocDefault) );
CUDA_SAFE_CALL( hipMalloc(&d_data_out[i], memsize) );
CUDA_SAFE_CALL( hipStreamCreate(&stream[i]) );
CUDA_SAFE_CALL( hipEventCreate(&cycleDone[i]) );
hipEventRecord(cycleDone[i], stream[i]);
}
hipEventCreate(&start); hipEventCreate(&stop);
init();
// Kernel warmup
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, 0, d_data_out[0], d_data_in[0], N, inner_reps);
// Time copies and kernel
hipEventRecord(start,0);
CUDA_SAFE_CALL( hipMemcpyAsync(d_data_in[0], h_data_in[0], memsize,
hipMemcpyHostToDevice,0) );
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float memcpy_h2d_time;
hipEventElapsedTime(&memcpy_h2d_time, start, stop);
hipEventRecord(start,0);
CUDA_SAFE_CALL( hipMemcpyAsync(h_data_out[0], d_data_out[0], memsize,
hipMemcpyDeviceToHost, 0) );
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float memcpy_d2h_time;
hipEventElapsedTime(&memcpy_d2h_time, start, stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block),0,0, d_data_out[0], d_data_in[0], N, inner_reps);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float kernel_time;
hipEventElapsedTime(&kernel_time, start, stop);
printf("Measured timings (throughput):\n");
printf(" Memcpy host to device\t: %f ms (%f GB/s)\n",
memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time );
printf(" Memcpy device to host\t: %f ms (%f GB/s)\n",
memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time);
printf(" Kernel\t\t\t: %f ms (%f GB/s)\n",
kernel_time, (inner_reps * memsize * 2e-6)/ kernel_time);
int type=0;
if( deviceProp.major == 2 && strstr(deviceProp.name, "GeForce") == 0 )
type = 2;
else if( deviceProp.deviceOverlap )
type = 1;
printf("\nTheoretical limits for overlaps (* capability of this device):\n");
printf(" %s c < 1.0 \t: %f ms (No overlap, fully serial)\n",
type == 0 ? "*" : " ", memcpy_h2d_time + memcpy_d2h_time + kernel_time);
printf(" %s 1.1 <= c < 2.0 \t: %f ms (Compute overlaps with one memcopy)\n",
type == 1 ? "*" : " ", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time));
printf(" %s c >= 2.0 \t: %f ms (Compute overlaps with two memcopies)\n",
type == 2 ? "*" : " ", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time));
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n",
STREAM_COUNT, overlap_time / nreps);
printf(" Avg. latency hidden (serialized - overlapped)\t: %f ms\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6))/ serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n",
STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time);
// Test result
if(test())
printf("\nPASSED\n");
else
printf("\nFAILED\n");
// Free resources
free( h_data_source );
free( h_data_sink );
for( int i =0; i<STREAM_COUNT; ++i ) {
hipHostFree(h_data_in[i]);
hipFree(d_data_in[i]);
hipHostFree(h_data_out[i]);
hipFree(d_data_out[i]);
hipStreamDestroy(stream[i]);
hipEventDestroy(cycleDone[i]);
}
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
cutilExit(argc, argv);
return 0;
}
float processWithStreams(int streams_used) {
int current_stream = 0;
float time;
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
hipEventRecord(start, 0);
for( int i=0; i<nreps; ++i ) {
int next_stream = (current_stream + 1 ) % streams_used;
#ifdef SIMULATE_IO
// Store the result
memcpy(h_data_sink, h_data_out[current_stream],memsize);
// Read new input
memcpy(h_data_in[next_stream], h_data_source, memsize);
#endif
// Ensure that processing and copying of the last cycle has finished
hipEventSynchronize(cycleDone[next_stream]);
// Process current frame
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, stream[current_stream],
d_data_out[current_stream],
d_data_in[current_stream],
N,
inner_reps);
// Upload next frame
CUDA_SAFE_CALL( hipMemcpyAsync(
d_data_in[next_stream],
h_data_in[next_stream],
memsize,
hipMemcpyHostToDevice,
stream[next_stream]) );
// Download current frame
CUDA_SAFE_CALL( hipMemcpyAsync(
h_data_out[current_stream],
d_data_out[current_stream],
memsize,
hipMemcpyDeviceToHost,
stream[current_stream]) );
CUDA_SAFE_CALL( hipEventRecord(
cycleDone[current_stream],
stream[current_stream]) );
current_stream = next_stream;
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&time, start, stop);
return time;
}
void init() {
for( int i=0; i<N; ++i) {
h_data_source[i] = 0;
}
for( int i =0; i<STREAM_COUNT; ++i ) {
memcpy(h_data_in[i], h_data_source, memsize);
}
}
bool test() {
bool passed = true;
for( int j =0; j<STREAM_COUNT; ++j ) {
for( int i =0; i<N; ++i ) {
passed &= (h_data_out[j][i] == 1);
}
}
return passed;
}
| 7b3e50534decd5efb6645732242f6216cc862b8a.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*
*
* Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies
* with kernel execution. This sample illustrates the usage of CUDA streams to
* achieve overlapping of kernel execution with copying data to and from the device.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKname = "simpleMultiCopy";
#include <stdio.h>
#include <cutil_inline.h>
#include <shrUtils.h>
__global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < N ) {
for( int i=0; i<inner_reps; ++i ) {
g_out[idx] = g_in[idx] + 1;
}
}
}
#define STREAM_COUNT 4
// Uncomment to simulate data source/sink IO times
//#define SIMULATE_IO
int *h_data_source;
int *h_data_sink;
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
cudaEvent_t cycleDone[STREAM_COUNT];
cudaStream_t stream[STREAM_COUNT];
cudaEvent_t start, stop;
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5;
int memsize;
dim3 block(512);
dim3 grid;
int thread_blocks;
float processWithStreams(int streams_used);
void init();
bool test();
int main(int argc, char *argv[])
{
int cuda_device = 0;
float scale_factor;
printf("[%s]\n", sSDKname);
if(shrCheckCmdLineFlag(argc, (const char**)argv, "device"))
{
cuda_device = cutilDeviceInit(argc, argv);
if (cuda_device < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
}
cudaDeviceProp deviceProp;
cutilSafeCall( cudaGetDeviceProperties(&deviceProp, cuda_device) );
// Anything that is less than 4 SM's will have scaled down workload
scale_factor = max((4.0f / (float)deviceProp.multiProcessorCount), 1.0f);
N = (int)( (float)N / scale_factor );
printf("> Device name: %s\n", deviceProp.name);
printf("> CUDA Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor,
deviceProp.multiProcessorCount);
printf("> scale_factor = %.2f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", N);
memsize = N * sizeof(int);
thread_blocks = N / block.x;
grid.x = thread_blocks % 65535;
grid.y = (thread_blocks / 65535 + 1);
// Allocate resources
h_data_source = (int*) malloc(memsize);
h_data_sink = (int*) malloc(memsize);
for( int i =0; i<STREAM_COUNT; ++i ) {
CUDA_SAFE_CALL( cudaHostAlloc(&h_data_in[i], memsize,
cudaHostAllocDefault) );
CUDA_SAFE_CALL( cudaMalloc(&d_data_in[i], memsize) );
CUDA_SAFE_CALL( cudaHostAlloc(&h_data_out[i], memsize,
cudaHostAllocDefault) );
CUDA_SAFE_CALL( cudaMalloc(&d_data_out[i], memsize) );
CUDA_SAFE_CALL( cudaStreamCreate(&stream[i]) );
CUDA_SAFE_CALL( cudaEventCreate(&cycleDone[i]) );
cudaEventRecord(cycleDone[i], stream[i]);
}
cudaEventCreate(&start); cudaEventCreate(&stop);
init();
// Kernel warmup
incKernel<<<grid, block>>>(d_data_out[0], d_data_in[0], N, inner_reps);
// Time copies and kernel
cudaEventRecord(start,0);
CUDA_SAFE_CALL( cudaMemcpyAsync(d_data_in[0], h_data_in[0], memsize,
cudaMemcpyHostToDevice,0) );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float memcpy_h2d_time;
cudaEventElapsedTime(&memcpy_h2d_time, start, stop);
cudaEventRecord(start,0);
CUDA_SAFE_CALL( cudaMemcpyAsync(h_data_out[0], d_data_out[0], memsize,
cudaMemcpyDeviceToHost, 0) );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float memcpy_d2h_time;
cudaEventElapsedTime(&memcpy_d2h_time, start, stop);
cudaEventRecord(start,0);
incKernel<<<grid, block,0,0>>>(d_data_out[0], d_data_in[0], N, inner_reps);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, stop);
printf("Measured timings (throughput):\n");
printf(" Memcpy host to device\t: %f ms (%f GB/s)\n",
memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time );
printf(" Memcpy device to host\t: %f ms (%f GB/s)\n",
memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time);
printf(" Kernel\t\t\t: %f ms (%f GB/s)\n",
kernel_time, (inner_reps * memsize * 2e-6)/ kernel_time);
int type=0;
if( deviceProp.major == 2 && strstr(deviceProp.name, "GeForce") == 0 )
type = 2;
else if( deviceProp.deviceOverlap )
type = 1;
printf("\nTheoretical limits for overlaps (* capability of this device):\n");
printf(" %s c < 1.0 \t: %f ms (No overlap, fully serial)\n",
type == 0 ? "*" : " ", memcpy_h2d_time + memcpy_d2h_time + kernel_time);
printf(" %s 1.1 <= c < 2.0 \t: %f ms (Compute overlaps with one memcopy)\n",
type == 1 ? "*" : " ", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time));
printf(" %s c >= 2.0 \t: %f ms (Compute overlaps with two memcopies)\n",
type == 2 ? "*" : " ", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time));
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n",
STREAM_COUNT, overlap_time / nreps);
printf(" Avg. latency hidden (serialized - overlapped)\t: %f ms\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6))/ serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n",
STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time);
// Test result
if(test())
printf("\nPASSED\n");
else
printf("\nFAILED\n");
// Free resources
free( h_data_source );
free( h_data_sink );
for( int i =0; i<STREAM_COUNT; ++i ) {
cudaFreeHost(h_data_in[i]);
cudaFree(d_data_in[i]);
cudaFreeHost(h_data_out[i]);
cudaFree(d_data_out[i]);
cudaStreamDestroy(stream[i]);
cudaEventDestroy(cycleDone[i]);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
float processWithStreams(int streams_used) {
int current_stream = 0;
float time;
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
cudaEventRecord(start, 0);
for( int i=0; i<nreps; ++i ) {
int next_stream = (current_stream + 1 ) % streams_used;
#ifdef SIMULATE_IO
// Store the result
memcpy(h_data_sink, h_data_out[current_stream],memsize);
// Read new input
memcpy(h_data_in[next_stream], h_data_source, memsize);
#endif
// Ensure that processing and copying of the last cycle has finished
cudaEventSynchronize(cycleDone[next_stream]);
// Process current frame
incKernel<<<grid, block, 0, stream[current_stream]>>>(
d_data_out[current_stream],
d_data_in[current_stream],
N,
inner_reps);
// Upload next frame
CUDA_SAFE_CALL( cudaMemcpyAsync(
d_data_in[next_stream],
h_data_in[next_stream],
memsize,
cudaMemcpyHostToDevice,
stream[next_stream]) );
// Download current frame
CUDA_SAFE_CALL( cudaMemcpyAsync(
h_data_out[current_stream],
d_data_out[current_stream],
memsize,
cudaMemcpyDeviceToHost,
stream[current_stream]) );
CUDA_SAFE_CALL( cudaEventRecord(
cycleDone[current_stream],
stream[current_stream]) );
current_stream = next_stream;
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&time, start, stop);
return time;
}
void init() {
for( int i=0; i<N; ++i) {
h_data_source[i] = 0;
}
for( int i =0; i<STREAM_COUNT; ++i ) {
memcpy(h_data_in[i], h_data_source, memsize);
}
}
bool test() {
bool passed = true;
for( int j =0; j<STREAM_COUNT; ++j ) {
for( int i =0; i<N; ++i ) {
passed &= (h_data_out[j][i] == 1);
}
}
return passed;
}
|
b3f7596a597cd157d002dd6332b1e2298f3034ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
Tensor index_select_quantized_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
Tensor out = at::empty_quantized({0}, self);
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
| b3f7596a597cd157d002dd6332b1e2298f3034ac.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(CUDA_VERSION) && CUDA_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
Tensor index_select_quantized_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
Tensor out = at::empty_quantized({0}, self);
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
f7549d216b81311576ce1b154661fa408296ba06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int absolute_row = threadIdx.x+(blockDim.x*blockIdx.x);
int absolute_column = threadIdx.y+(blockDim.y*blockIdx.y);
int idx = absolute_row*numCols+absolute_column;
greyImage[idx] = .299f*rgbaImage[idx].x + .587f*rgbaImage[idx].y + .114f*rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(24, 24, 1);
const dim3 gridSize(numRows/16, numCols/16, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| f7549d216b81311576ce1b154661fa408296ba06.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
int absolute_row = threadIdx.x+(blockDim.x*blockIdx.x);
int absolute_column = threadIdx.y+(blockDim.y*blockIdx.y);
int idx = absolute_row*numCols+absolute_column;
greyImage[idx] = .299f*rgbaImage[idx].x + .587f*rgbaImage[idx].y + .114f*rgbaImage[idx].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(24, 24, 1);
const dim3 gridSize(numRows/16, numCols/16, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
7a11f9758aeed302d9a77e59419d427da0e8e0c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 128
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *out)
{
/* Allocate shared memory for hipcub::BlockReduce */
__shared__ typename hipcub::BlockReduce<FLOATTYPE_T,
THREADS_PER_BLOCK>::TempStorage sArray;
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* grid stride handling case where array is larger than number of threads
* launched
* Loop over the grid stride so that each thread adds up its relevant
* elements of the array and saves them to a register.
*/
FLOATTYPE_T tempResult = 0.0;
for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x )
{
tempResult += in[i];
} /* end for */
/* Compute the block-wide sum for thread0 */
FLOATTYPE_T blockSum = hipcub::BlockReduce<FLOATTYPE_T,
THREADS_PER_BLOCK>(sArray).Sum(tempResult);
/* write the result back to global memory */
if( threadIdx.x == 0 ) out[blockIdx.x] = blockSum;
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum, *d_tempArray;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
int tempArraySize = 32768;
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( &d_in, memBytes ) );
checkCUDA( hipMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
checkCUDA( hipMalloc( &d_tempArray, tempArraySize * sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, h_in, memBytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
checkCUDA( hipMemset( d_tempArray, 0,
tempArraySize * sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads1( THREADS_PER_BLOCK, 1, 1 );
int blk = min( (size / threads1.x), tempArraySize );
dim3 blocks( blk, 1, 1);
dim3 threads2( min(blocks.x,threads1.x), 1, 1 );
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( sumReduction), dim3(blocks), dim3(threads1) , 0, 0, size, d_in, d_tempArray );
checkKERNEL()
hipLaunchKernelGGL(( sumReduction), dim3(1), dim3(threads2) , 0, 0, blocks.x, d_tempArray, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( hipMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
hipMemcpyDeviceToHost ) );
checkCUDA( hipEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / h_sum < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
printf("GPU result is %f, CPU result is %f\n",h_sum, cpu_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_sum ) );
checkCUDA( hipFree( d_tempArray ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 7a11f9758aeed302d9a77e59419d427da0e8e0c5.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <cub/block/block_reduce.cuh>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 128
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *out)
{
/* Allocate shared memory for cub::BlockReduce */
__shared__ typename cub::BlockReduce<FLOATTYPE_T,
THREADS_PER_BLOCK>::TempStorage sArray;
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* grid stride handling case where array is larger than number of threads
* launched
* Loop over the grid stride so that each thread adds up its relevant
* elements of the array and saves them to a register.
*/
FLOATTYPE_T tempResult = 0.0;
for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x )
{
tempResult += in[i];
} /* end for */
/* Compute the block-wide sum for thread0 */
FLOATTYPE_T blockSum = cub::BlockReduce<FLOATTYPE_T,
THREADS_PER_BLOCK>(sArray).Sum(tempResult);
/* write the result back to global memory */
if( threadIdx.x == 0 ) out[blockIdx.x] = blockSum;
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum, *d_tempArray;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
int tempArraySize = 32768;
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( &d_in, memBytes ) );
checkCUDA( cudaMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
checkCUDA( cudaMalloc( &d_tempArray, tempArraySize * sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, h_in, memBytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
checkCUDA( cudaMemset( d_tempArray, 0,
tempArraySize * sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads1( THREADS_PER_BLOCK, 1, 1 );
int blk = min( (size / threads1.x), tempArraySize );
dim3 blocks( blk, 1, 1);
dim3 threads2( min(blocks.x,threads1.x), 1, 1 );
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
sumReduction<<< blocks, threads1 >>>( size, d_in, d_tempArray );
checkKERNEL()
sumReduction<<< 1, threads2 >>>( blocks.x, d_tempArray, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( cudaMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
cudaMemcpyDeviceToHost ) );
checkCUDA( cudaEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / h_sum < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
printf("GPU result is %f, CPU result is %f\n",h_sum, cpu_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_sum ) );
checkCUDA( cudaFree( d_tempArray ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
b2dc62431bc4fc045705e065b9cb2b505be4d8e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved.
// Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#include "group_points_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void GroupPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints,
int nsample, const Tensor points,
const Tensor idx, Tensor out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
points.scalar_type(), "group_points_forward_cuda_kernel", [&] {
hipLaunchKernelGGL(( group_points_forward_cuda_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
b, c, n, npoints, nsample, points.data_ptr<scalar_t>(),
idx.data_ptr<int>(), out.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
}
void GroupPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints,
int nsample, const Tensor grad_out,
const Tensor idx,
Tensor grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_out.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_out.scalar_type(), "group_points_backward_cuda_kernel", [&] {
hipLaunchKernelGGL(( group_points_backward_cuda_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
b, c, n, npoints, nsample, grad_out.data_ptr<scalar_t>(),
idx.data_ptr<int>(), grad_points.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
}
| b2dc62431bc4fc045705e065b9cb2b505be4d8e3.cu | // Copyright (c) OpenMMLab. All rights reserved.
// Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#include "group_points_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void GroupPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints,
int nsample, const Tensor points,
const Tensor idx, Tensor out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
points.scalar_type(), "group_points_forward_cuda_kernel", [&] {
group_points_forward_cuda_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, nsample, points.data_ptr<scalar_t>(),
idx.data_ptr<int>(), out.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
}
void GroupPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints,
int nsample, const Tensor grad_out,
const Tensor idx,
Tensor grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
at::cuda::CUDAGuard device_guard(grad_out.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_out.scalar_type(), "group_points_backward_cuda_kernel", [&] {
group_points_backward_cuda_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, nsample, grad_out.data_ptr<scalar_t>(),
idx.data_ptr<int>(), grad_points.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
}
|
c27a500de303e867df5e7dd33bd722af8026d5a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// Copyright (c) 2013, Intel Corporation
/// Copyright (c) 2015, NVIDIA CORPORATION.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of <COPYRIGHT HOLDER> nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: transpose
///
/// PURPOSE: This program measures the time for the transpose of a
/// column-major stored matrix into a row-major stored matrix.
///
/// USAGE: Program input is the matrix order and the number of times to
/// repeat the operation:
///
/// transpose <matrix_size> <# iterations> [tile size]
///
/// An optional parameter specifies the tile size used to divide the
/// individual matrix blocks for improved cache and TLB performance.
///
/// The output consists of diagnostics to make sure the
/// transpose worked and timing statistics.
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, February 2016 and May 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
#include "prk_cuda.h"
#define TILED 1
#if TILED
// The kernel was derived from https://github.com/parallel-forall/code-samples/blob/master/series/cuda-cpp/transpose/transpose.cu,
// which is the reason for the additional copyright noted above.
const int tile_dim = 32;
const int block_rows = 8;
__global__ void transpose(int order, prk_float * A, prk_float * B)
{
auto x = blockIdx.x * tile_dim + threadIdx.x;
auto y = blockIdx.y * tile_dim + threadIdx.y;
auto width = gridDim.x * tile_dim;
for (int j = 0; j < tile_dim; j+= block_rows) {
B[x*width + (y+j)] += A[(y+j)*width + x];
A[(y+j)*width + x] += (prk_float)1;
}
}
#else
__global__ void transpose(unsigned order, prk_float * A, prk_float * B)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
B[i*order+j] += A[j*order+i];
A[j*order+i] += (prk_float)1;
}
}
#endif
int main(int argc, char * argv[])
{
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "C++11/CUDA Matrix transpose: B = A^T" << std::endl;
prk::CUDA::info info;
info.print();
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order, tile_size;
try {
if (argc < 3) {
throw "Usage: <# iterations> <matrix order>";
}
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > ::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
#if TILED
if (order % tile_dim != 0) {
std::cout << "Sorry, but order (" << order << ") must be evenly divible by " << tile_dim
<< " or the results are going to be wrong.\n";
}
#else
// default tile size for tiling of local transpose
tile_size = 32;
if (argc > 3) {
tile_size = std::atoi(argv[3]);
if (tile_size <= 0) tile_size = order;
if (tile_size > order) tile_size = order;
}
#endif
#ifdef __CORIANDERCC__
// This has not been analyzed, but it is an empirical fact.
if (order > 1234) {
std::cout << "The results are probably going to be wrong, because order>1234.\n";
}
#endif
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
#if TILED
std::cout << "Tile size = " << tile_dim << std::endl;
#else
std::cout << "Tile size = " << tile_size << std::endl;
#endif
#if TILED
dim3 dimGrid(order/tile_dim, order/tile_dim, 1);
dim3 dimBlock(tile_dim, block_rows, 1);
#else
dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1);
dim3 dimBlock(tile_size, tile_size, 1);
#endif
info.checkDims(dimBlock, dimGrid);
//////////////////////////////////////////////////////////////////////
// Allocate space for the input and transpose matrix
//////////////////////////////////////////////////////////////////////
const size_t nelems = (size_t)order * (size_t)order;
const size_t bytes = nelems * sizeof(prk_float);
prk_float * h_a;
prk_float * h_b;
#ifndef __CORIANDERCC__
prk::CUDA::check( hipHostMalloc((void**)&h_a, bytes) );
prk::CUDA::check( hipHostMalloc((void**)&h_b, bytes) );
#else
h_a = new prk_float[nelems];
h_b = new prk_float[nelems];
#endif
// fill A with the sequence 0 to order^2-1
for (int j=0; j<order; j++) {
for (int i=0; i<order; i++) {
h_a[j*order+i] = static_cast<prk_float>(order*j+i);
h_b[j*order+i] = static_cast<prk_float>(0);
}
}
// copy input from host to device
prk_float * d_a;
prk_float * d_b;
prk::CUDA::check( hipMalloc((void**)&d_a, bytes) );
prk::CUDA::check( hipMalloc((void**)&d_b, bytes) );
prk::CUDA::check( hipMemcpy(d_a, &(h_a[0]), bytes, hipMemcpyHostToDevice) );
prk::CUDA::check( hipMemcpy(d_b, &(h_b[0]), bytes, hipMemcpyHostToDevice) );
auto trans_time = 0.0;
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) trans_time = prk::wtime();
hipLaunchKernelGGL(( transpose), dim3(dimGrid), dim3(dimBlock), 0, 0, order, d_a, d_b);
#ifndef __CORIANDERCC__
// silence "ignoring hipDeviceSynchronize for now" warning
prk::CUDA::check( hipDeviceSynchronize() );
#endif
}
trans_time = prk::wtime() - trans_time;
// copy output back to host
prk::CUDA::check( hipMemcpy(&(h_b[0]), d_b, bytes, hipMemcpyDeviceToHost) );
#ifdef VERBOSE
// copy input back to host - debug only
prk::CUDA::check( hipMemcpy(&(h_a[0]), d_a, bytes, hipMemcpyDeviceToHost) );
#endif
prk::CUDA::check( hipFree(d_b) );
prk::CUDA::check( hipFree(d_a) );
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
const double addit = (iterations+1.) * (iterations/2.);
double abserr(0);
for (int j=0; j<order; j++) {
for (int i=0; i<order; i++) {
const size_t ij = (size_t)i*(size_t)order+(size_t)j;
const size_t ji = (size_t)j*(size_t)order+(size_t)i;
const double reference = static_cast<double>(ij)*(1.+iterations)+addit;
abserr += ::fabs(h_b[ji] - reference);
}
}
#ifdef VERBOSE
std::cout << "Sum of absolute differences: " << abserr << std::endl;
#endif
#ifndef __CORIANDERCC__
prk::CUDA::check( hipHostFree(h_b) );
prk::CUDA::check( hipHostFree(h_a) );
#endif
const auto epsilon = 1.0e-8;
if (abserr < epsilon) {
std::cout << "Solution validates" << std::endl;
auto avgtime = trans_time/iterations;
auto bytes = (size_t)order * (size_t)order * sizeof(prk_float);
std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime
<< " Avg time (s): " << avgtime << std::endl;
} else {
#ifdef VERBOSE
for (int i=0; i<order; i++) {
for (int j=0; j<order; j++) {
std::cout << "(" << i << "," << j << ") = " << h_a[i*order+j] << ", " << h_b[i*order+j] << "\n";
}
}
#endif
std::cout << "ERROR: Aggregate squared error " << abserr
<< " exceeds threshold " << epsilon << std::endl;
return 1;
}
return 0;
}
| c27a500de303e867df5e7dd33bd722af8026d5a9.cu | ///
/// Copyright (c) 2013, Intel Corporation
/// Copyright (c) 2015, NVIDIA CORPORATION.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of <COPYRIGHT HOLDER> nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: transpose
///
/// PURPOSE: This program measures the time for the transpose of a
/// column-major stored matrix into a row-major stored matrix.
///
/// USAGE: Program input is the matrix order and the number of times to
/// repeat the operation:
///
/// transpose <matrix_size> <# iterations> [tile size]
///
/// An optional parameter specifies the tile size used to divide the
/// individual matrix blocks for improved cache and TLB performance.
///
/// The output consists of diagnostics to make sure the
/// transpose worked and timing statistics.
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, February 2016 and May 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
#include "prk_cuda.h"
#define TILED 1
#if TILED
// The kernel was derived from https://github.com/parallel-forall/code-samples/blob/master/series/cuda-cpp/transpose/transpose.cu,
// which is the reason for the additional copyright noted above.
const int tile_dim = 32;
const int block_rows = 8;
__global__ void transpose(int order, prk_float * A, prk_float * B)
{
auto x = blockIdx.x * tile_dim + threadIdx.x;
auto y = blockIdx.y * tile_dim + threadIdx.y;
auto width = gridDim.x * tile_dim;
for (int j = 0; j < tile_dim; j+= block_rows) {
B[x*width + (y+j)] += A[(y+j)*width + x];
A[(y+j)*width + x] += (prk_float)1;
}
}
#else
__global__ void transpose(unsigned order, prk_float * A, prk_float * B)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i<order) && (j<order)) {
B[i*order+j] += A[j*order+i];
A[j*order+i] += (prk_float)1;
}
}
#endif
int main(int argc, char * argv[])
{
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "C++11/CUDA Matrix transpose: B = A^T" << std::endl;
prk::CUDA::info info;
info.print();
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order, tile_size;
try {
if (argc < 3) {
throw "Usage: <# iterations> <matrix order>";
}
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > std::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
#if TILED
if (order % tile_dim != 0) {
std::cout << "Sorry, but order (" << order << ") must be evenly divible by " << tile_dim
<< " or the results are going to be wrong.\n";
}
#else
// default tile size for tiling of local transpose
tile_size = 32;
if (argc > 3) {
tile_size = std::atoi(argv[3]);
if (tile_size <= 0) tile_size = order;
if (tile_size > order) tile_size = order;
}
#endif
#ifdef __CORIANDERCC__
// This has not been analyzed, but it is an empirical fact.
if (order > 1234) {
std::cout << "The results are probably going to be wrong, because order>1234.\n";
}
#endif
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
#if TILED
std::cout << "Tile size = " << tile_dim << std::endl;
#else
std::cout << "Tile size = " << tile_size << std::endl;
#endif
#if TILED
dim3 dimGrid(order/tile_dim, order/tile_dim, 1);
dim3 dimBlock(tile_dim, block_rows, 1);
#else
dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1);
dim3 dimBlock(tile_size, tile_size, 1);
#endif
info.checkDims(dimBlock, dimGrid);
//////////////////////////////////////////////////////////////////////
// Allocate space for the input and transpose matrix
//////////////////////////////////////////////////////////////////////
const size_t nelems = (size_t)order * (size_t)order;
const size_t bytes = nelems * sizeof(prk_float);
prk_float * h_a;
prk_float * h_b;
#ifndef __CORIANDERCC__
prk::CUDA::check( cudaMallocHost((void**)&h_a, bytes) );
prk::CUDA::check( cudaMallocHost((void**)&h_b, bytes) );
#else
h_a = new prk_float[nelems];
h_b = new prk_float[nelems];
#endif
// fill A with the sequence 0 to order^2-1
for (int j=0; j<order; j++) {
for (int i=0; i<order; i++) {
h_a[j*order+i] = static_cast<prk_float>(order*j+i);
h_b[j*order+i] = static_cast<prk_float>(0);
}
}
// copy input from host to device
prk_float * d_a;
prk_float * d_b;
prk::CUDA::check( cudaMalloc((void**)&d_a, bytes) );
prk::CUDA::check( cudaMalloc((void**)&d_b, bytes) );
prk::CUDA::check( cudaMemcpy(d_a, &(h_a[0]), bytes, cudaMemcpyHostToDevice) );
prk::CUDA::check( cudaMemcpy(d_b, &(h_b[0]), bytes, cudaMemcpyHostToDevice) );
auto trans_time = 0.0;
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) trans_time = prk::wtime();
transpose<<<dimGrid, dimBlock>>>(order, d_a, d_b);
#ifndef __CORIANDERCC__
// silence "ignoring cudaDeviceSynchronize for now" warning
prk::CUDA::check( cudaDeviceSynchronize() );
#endif
}
trans_time = prk::wtime() - trans_time;
// copy output back to host
prk::CUDA::check( cudaMemcpy(&(h_b[0]), d_b, bytes, cudaMemcpyDeviceToHost) );
#ifdef VERBOSE
// copy input back to host - debug only
prk::CUDA::check( cudaMemcpy(&(h_a[0]), d_a, bytes, cudaMemcpyDeviceToHost) );
#endif
prk::CUDA::check( cudaFree(d_b) );
prk::CUDA::check( cudaFree(d_a) );
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
const double addit = (iterations+1.) * (iterations/2.);
double abserr(0);
for (int j=0; j<order; j++) {
for (int i=0; i<order; i++) {
const size_t ij = (size_t)i*(size_t)order+(size_t)j;
const size_t ji = (size_t)j*(size_t)order+(size_t)i;
const double reference = static_cast<double>(ij)*(1.+iterations)+addit;
abserr += std::fabs(h_b[ji] - reference);
}
}
#ifdef VERBOSE
std::cout << "Sum of absolute differences: " << abserr << std::endl;
#endif
#ifndef __CORIANDERCC__
prk::CUDA::check( cudaFreeHost(h_b) );
prk::CUDA::check( cudaFreeHost(h_a) );
#endif
const auto epsilon = 1.0e-8;
if (abserr < epsilon) {
std::cout << "Solution validates" << std::endl;
auto avgtime = trans_time/iterations;
auto bytes = (size_t)order * (size_t)order * sizeof(prk_float);
std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime
<< " Avg time (s): " << avgtime << std::endl;
} else {
#ifdef VERBOSE
for (int i=0; i<order; i++) {
for (int j=0; j<order; j++) {
std::cout << "(" << i << "," << j << ") = " << h_a[i*order+j] << ", " << h_b[i*order+j] << "\n";
}
}
#endif
std::cout << "ERROR: Aggregate squared error " << abserr
<< " exceeds threshold " << epsilon << std::endl;
return 1;
}
return 0;
}
|
743ac7010af5e3de70d792287414dccec7e651c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N = 8
/* Nome: Nathana Facion RA:191079 */
/* Exercicio 7 - Matriz Add */
/* Data: 20/04/2017 */
__global__
void addMatriz(float *A,float *B, float *C, int linhas, int colunas ){
int i = threadIdx.x + blockDim.x*blockIdx.x; // linha
int j = threadIdx.y + blockDim.y*blockIdx.y; // coluna
if (i < linhas && j < colunas){
C[i*colunas+j] = A[i*colunas+j] + B[i*colunas+j];
}
}
int main()
{
int *A, *B, *C;
int *A, *B, *C;
int i, j;
// Declaracao do cuda
int *A_Cuda;
int *B_Cuda;
int *C_Cuda;
//Input
int linhas, colunas;
const int size = linhas*colunas* sizeof(int);
scanf("%d", &linhas);
scanf("%d", &colunas);
//Alocando memria na GPU
A_Cuda = (int *)malloc(size);
B_Cuda = (int *)malloc(size);
C_Cuda = (int *)malloc(size);
// Malloc para GPU
hipMalloc( (void**) & A_Cuda, size);
hipMalloc( (void**) & B_Cuda, size);
hipMalloc( (void**) & C_Cuda, size);
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
}
}
// Copia para GPU
hipMemcpy(A, A_Cuca, size, hipMemcpyHostToDevice);
hipMemcpy(B, B_Cuca, size, hipMemcpyHostToDevice);
hipMemcpy(C, C_Cuca, size, hipMemcpyHostToDevice);
dim3 threadPorBloco(N, N);
// O numero de blocos deve variar baseado na entrada
dim3 numeroBlocos( (int)ceil((float)linhas/threadPorBloco.x), (int)ceil((float)colunas/threadPorBloco.y) );
hipLaunchKernelGGL(( addMatriz), dim3(numeroBlocos),dim3(threadPorBloco), 0, 0, A_Cuda,B_Cuda,C_Cuda);
long long int somador=0;
//Manter esta computao na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
}
}
printf("%lli\n", somador);
// Libera memoria da GPU
hipFree(A_Cuda);
hipFree(B_Cuda);
hipFree(C_Cuda);
free(A);
free(B);
free(C);
}
| 743ac7010af5e3de70d792287414dccec7e651c6.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N = 8
/* Nome: Nathana Facion RA:191079 */
/* Exercicio 7 - Matriz Add */
/* Data: 20/04/2017 */
__global__
void addMatriz(float *A,float *B, float *C, int linhas, int colunas ){
int i = threadIdx.x + blockDim.x*blockIdx.x; // linha
int j = threadIdx.y + blockDim.y*blockIdx.y; // coluna
if (i < linhas && j < colunas){
C[i*colunas+j] = A[i*colunas+j] + B[i*colunas+j];
}
}
int main()
{
int *A, *B, *C;
int *A, *B, *C;
int i, j;
// Declaracao do cuda
int *A_Cuda;
int *B_Cuda;
int *C_Cuda;
//Input
int linhas, colunas;
const int size = linhas*colunas* sizeof(int);
scanf("%d", &linhas);
scanf("%d", &colunas);
//Alocando memória na GPU
A_Cuda = (int *)malloc(size);
B_Cuda = (int *)malloc(size);
C_Cuda = (int *)malloc(size);
// Malloc para GPU
cudaMalloc( (void**) & A_Cuda, size);
cudaMalloc( (void**) & B_Cuda, size);
cudaMalloc( (void**) & C_Cuda, size);
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
}
}
// Copia para GPU
cudaMemcpy(A, A_Cuca, size, cudaMemcpyHostToDevice);
cudaMemcpy(B, B_Cuca, size, cudaMemcpyHostToDevice);
cudaMemcpy(C, C_Cuca, size, cudaMemcpyHostToDevice);
dim3 threadPorBloco(N, N);
// O numero de blocos deve variar baseado na entrada
dim3 numeroBlocos( (int)ceil((float)linhas/threadPorBloco.x), (int)ceil((float)colunas/threadPorBloco.y) );
addMatriz<<<numeroBlocos,threadPorBloco>>>(A_Cuda,B_Cuda,C_Cuda);
long long int somador=0;
//Manter esta computação na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
}
}
printf("%lli\n", somador);
// Libera memoria da GPU
cudaFree(A_Cuda);
cudaFree(B_Cuda);
cudaFree(C_Cuda);
free(A);
free(B);
free(C);
}
|
639f0d9bbedeff2328b44a5e9a71b44850818401.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
//establish "right" camera direction
glm::normalize(eye); glm::normalize(view);
glm::vec3 right = glm::normalize(glm::cross(up, view));
// calculate P1 and P2 in both x and y directions
glm::vec3 image_center = eye + view;
glm::vec3 P1_X = image_center - tan((float)4.0*fov.x)*right;
glm::vec3 P2_X = image_center + tan((float)4.0*fov.x)*right;
glm::vec3 P1_Y = image_center - tan((float)4.0*fov.y)*up;
glm::vec3 P2_Y = image_center + tan((float)4.0*fov.y)*up;
glm::vec3 bottom_left = P1_X + (P1_Y - image_center);
glm::vec3 bottom_right = P2_X + (P1_Y - image_center);
glm::vec3 top_left = P1_X + (P2_Y - image_center);
glm::vec3 imgRight = bottom_right - bottom_left;
glm::vec3 imgUp = top_left - bottom_left;
// supersample the pixels by taking a randomly offset ray in each iteration
glm::vec3 random_offset = generateRandomNumberFromThread(resolution, time, x, y);
float x_offset = random_offset.x;
float y_offset = random_offset.y;
glm::vec3 img_point = bottom_left + ((float)x + x_offset)/(float)resolution.x*imgRight + ((float)y + y_offset)/(float)resolution.y*imgUp;
glm::vec3 direction = glm::normalize(img_point - eye);
// return value
ray r; r.origin = eye; r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, staticGeom* lights, int numberOfLights,
material* materials, int iterations, int traceDepth) {
// Find index of pixel and create empty color vector
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
glm::vec3 newColor(0,0,0);
// Get initial ray from camera through this position
ray currentRay = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray reflectionRay;
int currentDepth = 0;
bool reflect = false;
glm::vec3 currentSpecCoeff(1.0f, 1.0f, 1.0f);
// Return values for the intersection test
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
material intersection_mtl;
do {
// Find the closest geometry intersection along the ray
float t;
float min_t = -1.0;
for (int i = 0; i < numberOfGeoms; i++) {
staticGeom geom = geoms[i];
t = geomIntersectionTest(geom, currentRay, intersection_point, intersection_normal);
if ((t > 0.0) && (t < min_t || min_t < 0.0)) {
min_t = t;
intersection_mtl = materials[geom.materialid];
}
}
// find reflected ray if one exists
if (intersection_mtl.hasReflective) {
reflect = true;
glm::vec3 rd = calculateReflectionDirection(intersection_normal, currentRay.direction);
glm::vec3 ro = glm::vec3(intersection_point);
reflectionRay.direction = rd; reflectionRay.origin = ro;
}
else { reflect = false; }
// Find and clamp diffuse contribution at point
glm::vec3 phong = computePhongTotal(currentRay, intersection_point, intersection_normal, intersection_mtl,
lights, numberOfLights, geoms, numberOfGeoms, materials, (float)time);
if (phong.x > 1.0f) { phong.x = 1.0f; } else if (phong.x < 0.0f) { phong.x = 0.0f; }
if (phong.y > 1.0f) { phong.y = 1.0f; } else if (phong.y < 0.0f) { phong.y = 0.0f; }
if (phong.z > 1.0f) { phong.z = 1.0f; } else if (phong.z < 0.0f) { phong.z = 0.0f; }
newColor += (currentSpecCoeff * phong);
currentDepth++;
currentRay.origin = reflectionRay.origin;
currentRay.direction = reflectionRay.direction;
currentSpecCoeff *= intersection_mtl.specularColor;
}
while (reflect && (currentDepth < traceDepth));
if (newColor.x > 1.0f) { newColor.x = 1.0f; } else if (newColor.x < 0.0f) { newColor.x = 0.0f; }
if (newColor.y > 1.0f) { newColor.y = 1.0f; } else if (newColor.y < 0.0f) { newColor.y = 0.0f; }
if (newColor.z > 1.0f) { newColor.z = 1.0f; } else if (newColor.z < 0.0f) { newColor.z = 0.0f; }
if((x<=resolution.x && y<=resolution.y))
colors[index] += newColor / (float)iterations;
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 3; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//package geometry and send to GPU
int numberOfLights = 0;
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.objectid = geoms[i].objectid;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
//mark as a new light if positive emmitance
if (materials[newStaticGeom.materialid].emittance > 0.0)
numberOfLights++;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
//package materials and send to GPU
material* materialList = new material[numberOfMaterials];
for (int i=0; i<numberOfMaterials; i++){
material newMaterial;
newMaterial.color = materials[i].color;
newMaterial.specularExponent = materials[i].specularExponent;
newMaterial.specularColor = materials[i].specularColor;
newMaterial.hasReflective = materials[i].hasReflective;
newMaterial.hasRefractive = materials[i].hasRefractive;
newMaterial.indexOfRefraction = materials[i].indexOfRefraction;
newMaterial.hasScatter = materials[i].hasScatter;
newMaterial.absorptionCoefficient = materials[i].absorptionCoefficient;
newMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient;
newMaterial.emittance = materials[i].emittance;
materialList[i] = newMaterial;
}
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamaterials, materialList, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
int light_idx = 0;
staticGeom* lightList = new staticGeom[numberOfLights];
for(int i=0; i<numberOfGeoms; i++){
if (materials[geoms[i].materialid].emittance > 0.0) {
staticGeom newLight;
newLight.objectid = geoms[i].objectid;
newLight.type = geoms[i].type;
newLight.materialid = geoms[i].materialid;
newLight.translation = geoms[i].translations[frame];
newLight.rotation = geoms[i].rotations[frame];
newLight.scale = geoms[i].scales[frame];
newLight.transform = geoms[i].transforms[frame];
newLight.inverseTransform = geoms[i].inverseTransforms[frame];
lightList[light_idx++] = newLight;
}
}
staticGeom* cudalights = NULL;
hipMalloc((void**)&cudalights, numberOfLights*sizeof(staticGeom));
hipMemcpy(cudalights, lightList, numberOfLights*sizeof(staticGeom), hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//kernel launches
hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudalights, numberOfLights, cudamaterials, renderCam->iterations, traceDepth);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree( cudalights );
hipFree( cudamaterials );
delete geomList;
delete lightList;
delete materialList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| 639f0d9bbedeff2328b44a5e9a71b44850818401.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "glm/glm.hpp"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//TODO: IMPLEMENT THIS FUNCTION
//Function that does the initial raycast from the camera
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
//establish "right" camera direction
glm::normalize(eye); glm::normalize(view);
glm::vec3 right = glm::normalize(glm::cross(up, view));
// calculate P1 and P2 in both x and y directions
glm::vec3 image_center = eye + view;
glm::vec3 P1_X = image_center - tan((float)4.0*fov.x)*right;
glm::vec3 P2_X = image_center + tan((float)4.0*fov.x)*right;
glm::vec3 P1_Y = image_center - tan((float)4.0*fov.y)*up;
glm::vec3 P2_Y = image_center + tan((float)4.0*fov.y)*up;
glm::vec3 bottom_left = P1_X + (P1_Y - image_center);
glm::vec3 bottom_right = P2_X + (P1_Y - image_center);
glm::vec3 top_left = P1_X + (P2_Y - image_center);
glm::vec3 imgRight = bottom_right - bottom_left;
glm::vec3 imgUp = top_left - bottom_left;
// supersample the pixels by taking a randomly offset ray in each iteration
glm::vec3 random_offset = generateRandomNumberFromThread(resolution, time, x, y);
float x_offset = random_offset.x;
float y_offset = random_offset.y;
glm::vec3 img_point = bottom_left + ((float)x + x_offset)/(float)resolution.x*imgRight + ((float)y + y_offset)/(float)resolution.y*imgUp;
glm::vec3 direction = glm::normalize(img_point - eye);
// return value
ray r; r.origin = eye; r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, staticGeom* lights, int numberOfLights,
material* materials, int iterations, int traceDepth) {
// Find index of pixel and create empty color vector
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
glm::vec3 newColor(0,0,0);
// Get initial ray from camera through this position
ray currentRay = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray reflectionRay;
int currentDepth = 0;
bool reflect = false;
glm::vec3 currentSpecCoeff(1.0f, 1.0f, 1.0f);
// Return values for the intersection test
glm::vec3 intersection_point;
glm::vec3 intersection_normal;
material intersection_mtl;
do {
// Find the closest geometry intersection along the ray
float t;
float min_t = -1.0;
for (int i = 0; i < numberOfGeoms; i++) {
staticGeom geom = geoms[i];
t = geomIntersectionTest(geom, currentRay, intersection_point, intersection_normal);
if ((t > 0.0) && (t < min_t || min_t < 0.0)) {
min_t = t;
intersection_mtl = materials[geom.materialid];
}
}
// find reflected ray if one exists
if (intersection_mtl.hasReflective) {
reflect = true;
glm::vec3 rd = calculateReflectionDirection(intersection_normal, currentRay.direction);
glm::vec3 ro = glm::vec3(intersection_point);
reflectionRay.direction = rd; reflectionRay.origin = ro;
}
else { reflect = false; }
// Find and clamp diffuse contribution at point
glm::vec3 phong = computePhongTotal(currentRay, intersection_point, intersection_normal, intersection_mtl,
lights, numberOfLights, geoms, numberOfGeoms, materials, (float)time);
if (phong.x > 1.0f) { phong.x = 1.0f; } else if (phong.x < 0.0f) { phong.x = 0.0f; }
if (phong.y > 1.0f) { phong.y = 1.0f; } else if (phong.y < 0.0f) { phong.y = 0.0f; }
if (phong.z > 1.0f) { phong.z = 1.0f; } else if (phong.z < 0.0f) { phong.z = 0.0f; }
newColor += (currentSpecCoeff * phong);
currentDepth++;
currentRay.origin = reflectionRay.origin;
currentRay.direction = reflectionRay.direction;
currentSpecCoeff *= intersection_mtl.specularColor;
}
while (reflect && (currentDepth < traceDepth));
if (newColor.x > 1.0f) { newColor.x = 1.0f; } else if (newColor.x < 0.0f) { newColor.x = 0.0f; }
if (newColor.y > 1.0f) { newColor.y = 1.0f; } else if (newColor.y < 0.0f) { newColor.y = 0.0f; }
if (newColor.z > 1.0f) { newColor.z = 1.0f; } else if (newColor.z < 0.0f) { newColor.z = 0.0f; }
if((x<=resolution.x && y<=resolution.y))
colors[index] += newColor / (float)iterations;
}
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){
int traceDepth = 3; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//package geometry and send to GPU
int numberOfLights = 0;
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.objectid = geoms[i].objectid;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
//mark as a new light if positive emmitance
if (materials[newStaticGeom.materialid].emittance > 0.0)
numberOfLights++;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
//package materials and send to GPU
material* materialList = new material[numberOfMaterials];
for (int i=0; i<numberOfMaterials; i++){
material newMaterial;
newMaterial.color = materials[i].color;
newMaterial.specularExponent = materials[i].specularExponent;
newMaterial.specularColor = materials[i].specularColor;
newMaterial.hasReflective = materials[i].hasReflective;
newMaterial.hasRefractive = materials[i].hasRefractive;
newMaterial.indexOfRefraction = materials[i].indexOfRefraction;
newMaterial.hasScatter = materials[i].hasScatter;
newMaterial.absorptionCoefficient = materials[i].absorptionCoefficient;
newMaterial.reducedScatterCoefficient = materials[i].reducedScatterCoefficient;
newMaterial.emittance = materials[i].emittance;
materialList[i] = newMaterial;
}
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamaterials, materialList, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
int light_idx = 0;
staticGeom* lightList = new staticGeom[numberOfLights];
for(int i=0; i<numberOfGeoms; i++){
if (materials[geoms[i].materialid].emittance > 0.0) {
staticGeom newLight;
newLight.objectid = geoms[i].objectid;
newLight.type = geoms[i].type;
newLight.materialid = geoms[i].materialid;
newLight.translation = geoms[i].translations[frame];
newLight.rotation = geoms[i].rotations[frame];
newLight.scale = geoms[i].scales[frame];
newLight.transform = geoms[i].transforms[frame];
newLight.inverseTransform = geoms[i].inverseTransforms[frame];
lightList[light_idx++] = newLight;
}
}
staticGeom* cudalights = NULL;
cudaMalloc((void**)&cudalights, numberOfLights*sizeof(staticGeom));
cudaMemcpy(cudalights, lightList, numberOfLights*sizeof(staticGeom), cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//kernel launches
raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudalights, numberOfLights, cudamaterials, renderCam->iterations, traceDepth);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree( cudalights );
cudaFree( cudamaterials );
delete geomList;
delete lightList;
delete materialList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
f93c5838fd2ebbd40878e8ebdfd9d0b1aeb23c33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cusparse_v2.h>
#include <error.h>
#include <matrix.h>
#include <vector.h>
#include <types.h>
#include <thrust/sequence.h>
#include <util.h>
#include <sm_utils.inl>
#include <device_properties.h>
#include <amgx_cusparse.h>
#include <amgx_types/util.h>
#if CUDART_VERSION < 11000
#define CUSPARSE_SPMM_ALG_DEFAULT HIPSPARSE_MM_ALG_DEFAULT
#endif
#if CUDART_VERSION >= 12000
#define HIPSPARSE_CSRMV_ALG1 CUSPARSE_SPMV_CSR_ALG1
#define HIPSPARSE_CSRMV_ALG2 CUSPARSE_SPMV_CSR_ALG2
#endif
namespace amgx
{
Cusparse::Cusparse() : m_handle(0)
{
cusparseCheckError( hipsparseCreate(&m_handle) );
}
Cusparse::~Cusparse()
{
destroy_handle();
}
Cusparse &Cusparse::get_instance()
{
static Cusparse s_instance;
s_instance.create_handle();
return s_instance;
}
#ifndef DISABLE_MIXED_PRECISION
template <class T_Config>
hipsparseStatus_t
CusparseMatPrec<T_Config>::set(hipsparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, true);
}
template <AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indPrec>
hipsparseStatus_t CusparseMatPrec< TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matFloat, t_indPrec> >::set(hipsparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, false);
}
template <AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indPrec>
hipsparseStatus_t CusparseMatPrec< TemplateConfig<t_memSpace, AMGX_vecDoubleComplex, AMGX_matComplex, t_indPrec> >::set(hipsparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, false);
}
#endif
template< class TConfig >
void Cusparse::bsrmv(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
hipStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
// Handle cases where the view is set by the calling routine
if(view != A.getViewExterior())
{
bsrmv_internal(alphaConst, A, x, betaConst, y, view, null_stream);
return;
}
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(x, x.tag);
// Multiply interior rows
bsrmv_internal(alphaConst, A, x, betaConst, y, A.getViewInterior(), null_stream);
// Finish halo exchange
A.manager->exchange_halo_split_finish(x, x.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
bsrmv_internal(alphaConst, A, x, betaConst, y, bnd_view, null_stream);
}
else
{
if (!A.is_matrix_singleGPU() && x.dirtybit != 0)
{
A.manager->exchange_halo_v2(x, x.tag);
}
bsrmv_internal(alphaConst, A, x, betaConst, y, A.getViewExterior(), null_stream);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrmv_with_mask(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y)
{
hipStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(x, x.tag);
// Multiply interior
bsrmv_internal_with_mask(alphaConst, A, x, betaConst, y, INTERIOR, null_stream);
A.manager->exchange_halo_split_finish(x, x.tag);
// Multiply exterior
bsrmv_internal_with_mask(alphaConst, A, x, betaConst, y, BOUNDARY, null_stream);
}
else
{
if (!A.is_matrix_singleGPU() && x.dirtybit != 0)
{
A.manager->exchange_halo_v2(x, x.tag);
}
bsrmv_internal(alphaConst, A, x, betaConst, y, OWNED, null_stream);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrmv_with_mask_restriction(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &R,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
Matrix<TConfig> &P)
{
// If only COO, add CSR since bsrmv doesn't support COO
//if (A.hasProps(COO) && !A.hasProps(CSR))
//{
// A.set_initialized(0);
// A.addProps(CSR);
// A.computeDiagonal();
// A.set_initialized(1);
//}
bool latencyHiding = (R.getViewInterior() != R.getViewExterior() && !P.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
hipStream_t null_stream = 0;
bsrmv_internal_with_mask_restriction(alphaConst, R, x, betaConst, y, HALO1, null_stream, P);
P.manager->add_from_halo_split_gather(y, y.tag);
hipEventRecord(P.manager->get_comm_event());
bsrmv_internal_with_mask_restriction(alphaConst, R, x, betaConst, y, OWNED, null_stream, P);
if (P.manager->neighbors.size() != 0)
{
hipEventSynchronize(P.manager->get_comm_event());
P.manager->add_from_halo_split_finish(y, y.tag, P.manager->get_bdy_stream());
hipStreamSynchronize(P.manager->get_bdy_stream());
}
}
else
{
bsrmv_internal(alphaConst, R, x, betaConst, y, OWNED, 0);
// Add contribution from neighbors
P.manager->add_from_halo_v2(y, y.tag);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrxmv(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
typename Matrix<TConfig>::IVector &mask,
ViewType view )
{
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
const int *start_offsets, *end_offsets;
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
if (view == OWNED) //This is already a view, thus do not even attempt to do latency hiding
{
bsrxmv_internal(Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE,
mask.size(),
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
A.values.raw(),
mask.raw(),
start_offsets, end_offsets,
A.col_indices.raw(),
A.get_block_dimx(),
x.raw(),
&betaConst,
y.raw());
}
else //Try and do latency hiding
{
// latency hiding?
FatalError("Trying to do latency hiding in the bsrxmv", AMGX_ERR_NOT_IMPLEMENTED);
}
}
// E is a vector that represents a diagonal matrix
// operate on all rows and columns
// y= alpha*E.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
hipStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, view, null_stream);
}
else //Try and do latency hiding
{
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
ViewType flags;
bool latencyHiding = true;
//if (A.manager->num_neighbors() == 0 || (x.dirtybit == 0)) {
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU())
// if (y.size() == x.size() && y.delayed_send==0) A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
// operate only on columns specified by columnColorSelector, see enum ColumnColorSelector above
// operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color
// y= alpha*A.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( ColumnColorSelector columnColorSelector,
const int color,
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
hipStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
// std::cerr << "exterior view with selector" << std::endl;
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, view, null_stream);
}
else
{
//Try and do latency hiding
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
ViewType flags;
bool latencyHiding = true;
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && y.size() == x.size() && y.delayed_send==0)
// A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
// E is a vector that represents a diagonal matrix
// operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color
// y= alpha*E.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( const int color,
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view)
{
hipStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, view, null_stream);
}
else //Try and do latency hiding
{
//std::ccooor << "de" << std::endl;
//std::cerr << "not an exterior view" << std::endl;
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
//std::cerr << "exchange_halo_wait" << std::endl;
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
//std::cerr << "xxxeded" << std::endl;
ViewType flags;
bool latencyHiding = true;
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && y.size() == x.size() && y.delayed_send==0)
// A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
__global__ void offset_by_col_off(int nrows, int* rows, const int* bsrRowPtr)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= nrows+1)
{
return;
}
rows[i] = bsrRowPtr[i] - bsrRowPtr[0];
}
template< class TConfig >
void Cusparse::bsrmv_internal( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream)
{
typedef typename TConfig::VecPrec ValueTypeB;
int rowOff, nrows, nnz;
A.getOffsetAndSizeForView(view, &rowOff, &nrows);
A.getNnzForView(view, &nnz);
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
bool has_offdiag = nnz != 0;
if (has_offdiag )
{
bsrmv( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.m_seq_offsets.raw() + rowOff,
A.row_offsets.raw() + rowOff,
A.col_indices.raw(),
rowOff,
A.get_block_dimx(),
x.raw(),
&betaConst,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
if (A.hasProps(DIAG))
{
ValueTypeB beta;
if (!has_offdiag)
{
beta = betaConst;
}
else
{
beta = types::util<ValueTypeB>::get_one();
}
bsrmv( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), A.get_num_rows(), &alphaConst,
A.cuMatDescr,
A.values.raw() + A.diagOffset()*A.get_block_size(),
A.m_seq_offsets.raw(),
A.m_seq_offsets.raw() + rowOff,
A.m_seq_offsets.raw(),
rowOff,
A.get_block_dimx(),
x.raw(), &beta,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal_with_mask( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream)
{
if (A.is_matrix_singleGPU())
{
FatalError("Should not be here in bsrmv_internal_with_mask", AMGX_ERR_NOT_IMPLEMENTED);
}
if(view != INTERIOR && view != BOUNDARY)
{
FatalError("Only INTERIOR and BOUNDARY views supported for bsrmv_internal_with_mask", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename TConfig::VecPrec ValueType;
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
const int *start_offsets, *end_offsets;
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
typedef typename Matrix<TConfig>::index_type index_type;
int offset, nrows, nnz;
A.getOffsetAndSizeForView(view, &offset, &nrows);
A.getNnzForView(view, &nnz);
if (nrows <= 0)
{
return; // nothing to do, early exit
}
bool has_offdiag = nnz != 0;
if (has_offdiag)
{
hipsparseSetStream(Cusparse::get_instance().m_handle, stream);
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE, nrows,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.manager->getRowsListForView(view).raw(),
start_offsets, end_offsets, A.col_indices.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
// Reset to default stream
hipsparseSetStream(Cusparse::get_instance().m_handle, 0);
}
if (A.hasProps(DIAG))
{
FatalError("Diag not supported in multiply with mask\n", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal_with_mask_restriction( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &R,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream,
Matrix<TConfig> &P)
{
if (P.is_matrix_singleGPU())
{
FatalError("Should not be here in bsrmv_internal_with_mask_with_restriction", AMGX_ERR_NOT_IMPLEMENTED);
}
if(view != OWNED && view != HALO1)
{
FatalError("View not supported in restriction operation", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename TConfig::VecPrec ValueType;
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( R.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
int rowOff, nrows, nnz;
R.getFixedSizesForView(view, &rowOff, &nrows, &nnz);
bool has_offdiag = nnz != 0;
typedef typename Matrix<TConfig>::index_type index_type;
if (nrows <= 0)
{
return; // nothing to do, early exit
}
if (has_offdiag)
{
bsrmv( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE,
nrows, R.get_num_cols(), nnz, &alphaConst,
R.cuMatDescr,
R.values.raw(),
R.m_seq_offsets.raw() + rowOff,
R.row_offsets.raw() + rowOff,
R.col_indices.raw(),
rowOff,
R.get_block_dimx(),
x.raw(),
&betaConst,
y.raw() + rowOff * R.get_block_dimx(),
stream);
}
if (R.hasProps(DIAG))
{
FatalError("Diag not supported in multiply with mask\n", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
int rowOff, nrows, nnz;
A.getFixedSizesForView(view, &rowOff, &nrows, &nnz);
hipsparseDirection_t direction = A.getBlockFormat() == ROW_MAJOR ? HIPSPARSE_DIRECTION_ROW : HIPSPARSE_DIRECTION_COLUMN;
bsrmv( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
E.raw(),
A.m_seq_offsets.raw(),
A.m_seq_offsets.raw() + rowOff,
A.m_seq_offsets.raw(),
rowOff,
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
template< class TConfig >
void Cusparse::bsrmv_internal( ColumnColorSelector columnColorSelector,
const int color,
const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
if (!A.hasProps(COLORING))
{
FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS);
}
if (color < 0 || color >= A.getMatrixColoring().getNumColors())
{
FatalError("Unknown color", AMGX_ERR_BAD_PARAMETERS);
}
typedef typename Matrix<TConfig>::index_type index_type;
index_type colorStart = 0;
if ( !(view & INTERIOR) )
{
colorStart = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorStart = A.getMatrixColoring().getOffsetsRowsPerColor()[color];
}
index_type colorNum = 0;
if ( view == A.getViewInterior() )
{
colorNum = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorNum = A.getMatrixColoring().getOffsetsRowsPerColor()[color + 1];
}
colorNum -= colorStart;
if (colorNum <= 0)
{
return; // nothing to do, early exit
}
if (columnColorSelector == DIAG_COL)
{
FatalError("DIAG_COL has been disabled to avoid allocating diag_offsets", AMGX_ERR_NOT_IMPLEMENTED);
}
const int *start_offsets, *end_offsets;
switch (columnColorSelector)
{
case ALL_COLORS:
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
break;
case SMALLER_COLORS:
start_offsets = A.row_offsets.raw();
end_offsets = A.m_smaller_color_offsets.raw();
break;
case LARGER_COLORS:
start_offsets = A.m_larger_color_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
break;
case DIAG_COL:
start_offsets = A.diag.raw();
end_offsets = A.m_diag_end_offsets.raw();
break;
default:
FatalError("Unknown color selector", AMGX_ERR_CORE);
}
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
bool has_offdiag = A.get_num_nz() != 0;
hipsparseSetStream(Cusparse::get_instance().m_handle, stream);
if (has_offdiag)
{
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
start_offsets, end_offsets, A.col_indices.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
}
if (A.hasProps(DIAG) && columnColorSelector == ALL_COLORS)
{
typename TConfig::VecPrec beta = betaConst;
if (has_offdiag)
{
beta = types::util<typename TConfig::VecPrec>::get_one();
}
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_rows(), &alphaConst,
A.cuMatDescr,
A.values.raw() + A.diagOffset()*A.get_block_size(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
A.m_seq_offsets.raw(),
NULL,
A.m_seq_offsets.raw(),
A.get_block_dimx(),
x.raw(), &beta,
y.raw() );
}
// Reset to default stream
hipsparseSetStream(Cusparse::get_instance().m_handle, 0);
}
template< class TConfig >
void Cusparse::bsrmv_internal( const int color,
const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const hipStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
if ( !A.hasProps(COLORING) )
{
FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS);
}
if ( color < 0 || color >= A.getMatrixColoring().getNumColors() )
{
FatalError("Unknown color", AMGX_ERR_BAD_PARAMETERS);
}
typedef typename Matrix<TConfig>::index_type index_type;
//const index_type colorStart = ((view & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color] : A.getMatrixColoring().getOffsetsRowsPerColor()[color];
//const index_type colorNum = ((view == A.getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color] : A.getMatrixColoring().getOffsetsRowsPerColor()[color+1]) - colorStart;
//const index_type colorStart= A.getMatrixColoring().getOffsetsRowsPerColor()[color];
//const index_type colorNum= A.getMatrixColoring().getOffsetsRowsPerColor()[color+1] - colorStart;
index_type colorStart = 0;
if ( !(view & INTERIOR) )
{
colorStart = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorStart = A.getMatrixColoring().getOffsetsRowsPerColor()[color];
}
index_type colorNum = 0;
if ( view == A.getViewInterior() )
{
colorNum = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorNum = A.getMatrixColoring().getOffsetsRowsPerColor()[color + 1];
}
colorNum -= colorStart;
if (colorNum <= 0)
{
return; // nothing to do, early exit
}
hipsparseDirection_t direction = HIPSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = HIPSPARSE_DIRECTION_ROW;
}
hipsparseSetStream(Cusparse::get_instance().m_handle, stream);
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, HIPSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
E.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
A.m_seq_offsets.raw(),
NULL,
A.m_seq_offsets.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
// Reset to default stream
hipsparseSetStream(Cusparse::get_instance().m_handle, 0);
}
#ifdef CUSPARSE_GENERIC_INTERFACES
// Simple custom implementation of matrix-vector product that has only 1 kernel.
template<unsigned UNROLL, class T>
__global__ void csrmv(
int nrows,
const T alpha,
const T* __restrict__ csrVal,
const int* __restrict__ csrRow,
const int* __restrict__ csrCol,
const T* __restrict__ x,
const T beta,
T* __restrict__ y)
{
for(int i = threadIdx.x + blockIdx.x*blockDim.x; i < nrows; i += blockDim.x*gridDim.x)
{
T y_tmp = amgx::types::util<T>::get_zero();
int row_b = csrRow[i];
int row_e = csrRow[i+1];
// Unrolling is important for performance here.
// Possible to squeeze more performance out of the key kernels if we
// measure the sparsity and use it to inform unrolling.
for (int col = row_b; col < row_e; col += UNROLL)
{
#pragma unroll UNROLL
for(int off = 0; off < UNROLL; ++off)
{
int c = col + off;
if(c < row_e) y_tmp = alpha * csrVal[c] * x[csrCol[c]] + y_tmp;
}
}
// Don't read y unnecessarily
if(amgx::types::util<T>::is_zero(beta))
{
y[i] = y_tmp;
}
else
{
y[i] = beta*y[i] + y_tmp;
}
}
}
template<class MatType, class VecType, class IndType>
inline void generic_SpMV(hipsparseHandle_t handle, hipsparseOperation_t trans,
int mb, int nb, int nnzb, int rowOff,
const MatType *alpha,
const MatType *val,
const IndType *rowPtr,
const IndType *colInd,
const VecType *x,
const VecType *beta,
VecType *y,
hipDataType matType,
hipDataType vecType,
const hipStream_t& stream)
{
constexpr int cta_size = 128;
const int sm_count = getSMCount();
// Assuming that csrmv will be more efficient than cuSPARSE for row counts
// that are lower than the 3 times the total number of threads
// cuSPARSE does not like the offsetting required when latency hiding
// it's possible to reverse the offsets, but requires extra kernel invocation
// and usually the dependent part of the call is smaller
if(rowOff > 0 || mb < cta_size * sm_count * 3)
{
// Custom single-kernel SpMV, we could actually determine unroll factor
// more accurately here by checking non-zeros per row
constexpr int unroll_factor = 16;
int nblocks = mb / cta_size + 1;
hipLaunchKernelGGL(( csrmv<unroll_factor>), dim3(nblocks), dim3(cta_size), 0, 0, mb, *alpha, val, rowPtr, colInd, x, *beta, y);
}
else
{
IndType* rows = const_cast<IndType*>(rowPtr);
IndType* cols = const_cast<IndType*>(colInd);
MatType* vals = const_cast<MatType*>(val);
hipsparseSpMatDescr_t matA_descr;
hipsparseDnVecDescr_t vecX_descr;
hipsparseDnVecDescr_t vecY_descr;
cusparseCheckError(hipsparseCreateDnVec(&vecX_descr, nb, const_cast<VecType*>(x), vecType));
cusparseCheckError(hipsparseCreateDnVec(&vecY_descr, mb, const_cast<VecType*>(y), vecType));
cusparseCheckError(
hipsparseCreateCsr(&matA_descr, mb, nb, nnzb, const_cast<IndType*>(rows), const_cast<IndType*>(cols),
const_cast<MatType*>(vals), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, matType));
size_t bufferSize = 0;
cusparseCheckError(hipsparseSpMV_bufferSize(handle, trans, alpha, matA_descr, vecX_descr, beta, vecY_descr, matType, HIPSPARSE_CSRMV_ALG2, &bufferSize));
void* dBuffer = NULL;
if(bufferSize > 0)
{
amgx::memory::hipMalloc(&dBuffer, bufferSize);
}
cusparseCheckError(hipsparseSpMV(handle, trans, alpha, matA_descr, vecX_descr, beta, vecY_descr, matType, HIPSPARSE_CSRMV_ALG2, dBuffer) );
cusparseCheckError(hipsparseDestroySpMat(matA_descr));
cusparseCheckError(hipsparseDestroyDnVec(vecX_descr));
cusparseCheckError(hipsparseDestroyDnVec(vecY_descr));
if(bufferSize > 0)
{
amgx::memory::hipFreeAsync(dBuffer);
}
}
}
#endif
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const float *alpha,
const hipsparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const float *x,
const float *beta,
float *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, HIP_R_32F, HIP_R_32F, stream);
#else
cusparseCheckError(hipsparseScsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(hipsparseSbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const double *alpha,
const hipsparseMatDescr_t descr,
const double *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const double *x,
const double *beta,
double *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, HIP_R_64F, HIP_R_64F, stream);
#else
cusparseCheckError(hipsparseDcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(hipsparseDbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const double *alpha,
const hipsparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const double *x,
const double *beta,
double *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
#ifndef DISABLE_MIXED_PRECISION
const double *d_bsrVal = reinterpret_cast<const double *>(const_cast<float *>(bsrVal)); // this works due to private API call in the matrix initialization which sets cusparse matrix description in the half precision mode
cusparseCheckError(cusparseDbsrxmv(handle, dir, trans, mb, mb, nb, nnzb, alpha, descr, d_bsrVal, bsrMaskPtr, bsrRowPtr, bsrRowPtr + 1, bsrColInd, blockDim, x, beta, y));
#else
FatalError("Mixed precision modes not currently supported for CUDA 10.1 or later.", AMGX_ERR_NOT_IMPLEMENTED);
#endif
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
// Custom implementation of matrix-vector product to replace the original bsrxmv,
// but with block size of 1.
template<unsigned UNROLL, class T>
__global__ void csrxmv(
int sizeOfMask,
const T alpha,
const T* __restrict__ csrVal,
const int* __restrict__ csrMask,
const int* __restrict__ csrRow,
const int* __restrict__ csrCol,
const T* __restrict__ x,
const T beta,
T* __restrict__ y)
{
for(int i = threadIdx.x + blockIdx.x*blockDim.x; i < sizeOfMask; i += blockDim.x*gridDim.x)
{
int row = csrMask[i];
T y_tmp = amgx::types::util<T>::get_zero();
int row_b = csrRow[row];
int row_e = csrRow[row+1];
// Unrolling is important for performance here.
// Possible to squeeze more performance out of the key kernels if we
// measure the sparsity and use it to inform unrolling.
for (int col = row_b; col < row_e; col += UNROLL)
{
#pragma unroll UNROLL
for(int off = 0; off < UNROLL; ++off)
{
int c = col + off;
if(c < row_e) y_tmp = alpha * csrVal[c] * x[csrCol[c]] + y_tmp;
}
}
// Don't read y unnecessarily
if(amgx::types::util<T>::is_zero(beta))
{
y[row] = y_tmp;
}
else
{
y[row] = beta*y[row] + y_tmp;
}
}
}
// Replaces the functionality of cusparse?bsrxmv for blockDim == 1
template<class T>
inline void Xcsrxmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const T *alpha,
const hipsparseMatDescr_t descr,
const T *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const T *x,
const T *beta,
T *y)
{
if(blockDim != 1)
{
FatalError("Xcsrxmv only to be called with scalar matrices.", AMGX_ERR_INTERNAL);
}
if (trans != HIPSPARSE_OPERATION_NON_TRANSPOSE)
{
FatalError("Cannot currently latency hide a transposed matrix.", AMGX_ERR_NOT_IMPLEMENTED);
}
if (dir != HIPSPARSE_DIRECTION_ROW)
{
FatalError("Cannot currently latency hide if matrix is not row major.", AMGX_ERR_NOT_IMPLEMENTED);
}
constexpr int cta_size = 128;
constexpr int unroll_factor = 16;
int nblocks = sizeOfMask / cta_size + 1;
hipLaunchKernelGGL(( csrxmv<unroll_factor>), dim3(nblocks), dim3(cta_size), 0, 0, sizeOfMask, *alpha, bsrVal, bsrMaskPtr, bsrRowPtr, bsrColInd, x, *beta, y);
}
// overloaded C++ wrappers for cusparse?bsrxmv
// bsrxmv
// matrix - float
// vector - float
inline void Cusparse::bsrxmv_internal(hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const float *alpha,
const hipsparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const float *x,
const float *beta,
float *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseSbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
// bsrxmv
// matrix - float
// vector - double
inline void Cusparse::bsrxmv_internal( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const double *alpha,
const hipsparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const double *x,
const double *beta,
double *y)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
// bsrxmv
// matrix - double
// vector - double
inline void Cusparse::bsrxmv_internal( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const double *alpha,
const hipsparseMatDescr_t descr,
const double *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const double *x,
const double *beta,
double *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseDbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const hipComplex *alpha,
const hipsparseMatDescr_t descr,
const hipComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const hipComplex *x,
const hipComplex *beta,
hipComplex *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, HIP_C_32F, HIP_C_32F, stream);
#else
cusparseCheckError(hipsparseCcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(hipsparseCbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descr,
const hipDoubleComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const hipDoubleComplex *x,
const hipDoubleComplex *beta,
hipDoubleComplex *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, HIP_C_64F, HIP_C_64F, stream);
#else
cusparseCheckError(hipsparseZcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(hipsparseZbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans,
int mb, int nb, int nnzb,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descr,
const hipComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const hipDoubleComplex *x,
const hipDoubleComplex *beta,
hipDoubleComplex *y,
const hipStream_t& stream)
{
// Run cuSparse on selected stream
hipsparseSetStream(handle, stream);
#ifndef DISABLE_MIXED_PRECISION
const hipDoubleComplex *d_bsrVal = reinterpret_cast<hipDoubleComplex *>(const_cast<hipComplex *>(bsrVal));
cusparseCheckError(cusparseZbsrxmv(handle, dir, trans, mb, mb, nb, nnzb, alpha, descr, d_bsrVal, bsrMaskPtr, bsrRowPtr, bsrRowPtr + 1, bsrColInd, blockDim, x, beta, y));
#else
FatalError("Mixed precision modes not currently supported for CUDA 10.1 or later.", AMGX_ERR_NOT_IMPLEMENTED);
#endif
// Reset cuSparse to default stream
hipsparseSetStream(handle, 0);
}
// overloaded C++ wrappers for cusparse?bsrxmv
// bsrxmv
// matrix - hipComplex
// vector - hipComplex
inline void Cusparse::bsrxmv_internal( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const hipComplex *alpha,
const hipsparseMatDescr_t descr,
const hipComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const hipComplex *x,
const hipComplex *beta,
hipComplex *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseCbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
// bsrxmv
// matrix - hipComplex
// vector - hipDoubleComplex
inline void Cusparse::bsrxmv_internal( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descr,
const hipComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const hipDoubleComplex *x,
const hipDoubleComplex *beta,
hipDoubleComplex *y)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
// bsrxmv
// matrix - hipDoubleComplex
// vector - hipDoubleComplex
inline void Cusparse::bsrxmv_internal( hipsparseHandle_t handle, hipsparseDirection_t dir, hipsparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descr,
const hipDoubleComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const hipDoubleComplex *x,
const hipDoubleComplex *beta,
hipDoubleComplex *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseZbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
namespace
{
#ifdef CUSPARSE_GENERIC_INTERFACES
template<class MatType, class IndType>
inline void
generic_SpMM(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
int ldb, int ldc,
const MatType *alpha,
const MatType *Avals,
const MatType *Bvals,
MatType *Cvals,
const IndType *rowPtr,
const IndType *colInd,
const MatType *beta,
hipDataType matType)
{
// Create the matrix descriptors
hipsparseSpMatDescr_t matA_descr;
hipsparseDnMatDescr_t matB_descr;
hipsparseDnMatDescr_t matC_descr;
cusparseCheckError(
hipsparseCreateCsr(&matA_descr, m, k, nnz, const_cast<IndType*>(rowPtr), const_cast<IndType*>(colInd),
const_cast<MatType*>(Avals), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, matType));
cusparseCheckError(
hipsparseCreateDnMat(&matB_descr, k, n, ldb, const_cast<MatType*>(Bvals), matType, HIPSPARSE_ORDER_COL));
cusparseCheckError(
hipsparseCreateDnMat(&matC_descr, m, n, ldc, const_cast<MatType*>(Cvals), matType, HIPSPARSE_ORDER_COL));
// Check if a buffer is required, and if so allocate it using caching allocator
size_t bufferSize = 0;
cusparseCheckError(
hipsparseSpMM_bufferSize(handle, transA, HIPSPARSE_OPERATION_NON_TRANSPOSE, alpha, matA_descr, matB_descr,
beta, matC_descr, matType, CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize));
void* dBuffer = NULL;
if(bufferSize > 0)
{
amgx::memory::hipMalloc(&dBuffer, bufferSize);
}
// Compute the sparse matrix - dense matrix product
cusparseCheckError(
hipsparseSpMM(handle, transA, HIPSPARSE_OPERATION_NON_TRANSPOSE, alpha, matA_descr, matB_descr, beta,
matC_descr, matType, CUSPARSE_SPMM_ALG_DEFAULT, dBuffer));
// Clean up
cusparseCheckError(hipsparseDestroySpMat(matA_descr));
cusparseCheckError(hipsparseDestroyDnMat(matB_descr));
cusparseCheckError(hipsparseDestroyDnMat(matC_descr));
if(bufferSize > 0)
{
amgx::memory::hipFreeAsync(dBuffer);
}
}
#endif
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const float *alpha,
const hipsparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const float *B, int ldb,
const float *beta, float *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, HIP_R_32F);
#else
cusparseCheckError(hipsparseScsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const double *alpha,
const hipsparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const double *alpha,
const hipsparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, HIP_R_64F);
#else
cusparseCheckError(hipsparseDcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const hipComplex *alpha,
const hipsparseMatDescr_t descrA,
const hipComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const hipComplex *B, int ldb,
const hipComplex *beta, hipComplex *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, HIP_C_32F);
#else
cusparseCheckError(hipsparseCcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descrA,
const hipComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const hipDoubleComplex *B, int ldb,
const hipDoubleComplex *beta, hipDoubleComplex *C, int ldc)
{
cusparseCheckError(HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
void
cusparse_csrmm(hipsparseHandle_t handle, hipsparseOperation_t transA,
int m, int n, int k, int nnz,
const hipDoubleComplex *alpha,
const hipsparseMatDescr_t descrA,
const hipDoubleComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const hipDoubleComplex *B, int ldb,
const hipDoubleComplex *beta, hipDoubleComplex *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, HIP_C_64F);
#else
cusparseCheckError(hipsparseZcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
}
template <class TConfig>
void Cusparse::csrmm(typename TConfig::VecPrec alpha,
Matrix<TConfig> &A,
Vector<TConfig> &V,
typename TConfig::VecPrec beta,
Vector<TConfig> &Res)
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo(V, V.tag);
}
if (Res.get_num_rows() != A.get_num_rows() || Res.get_num_cols() != V.get_num_cols())
{
FatalError("Cusparse::csrmm error, dimensions of result matrix do not match input matrices.", AMGX_ERR_INTERNAL);
}
hipsparseHandle_t handle = Cusparse::get_instance().m_handle;
cusparse_csrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
A.get_num_rows(), V.get_num_cols(), A.get_num_cols(),
A.values.size(), &alpha, A.cuMatDescr,
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
V.raw(), V.get_lda(),
&beta, Res.raw(), Res.get_lda());
Res.dirtybit = 1;
}
template <class T>
void transpose_internal(hipsparseHandle_t handle, int nRows, int nCols, int nNz, const T* Avals, const int* Arows, const int* Acols, T* Bvals, int* Brows, int* Bcols, hipDataType valType)
{
size_t bufferSize;
cusparseCheckError(hipsparseCsr2cscEx2_bufferSize(
handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, valType,
HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, &bufferSize));
void *buffer = nullptr;
if (bufferSize > 0)
{
amgx::memory::hipMalloc(&buffer, bufferSize);
}
cusparseCheckError(hipsparseCsr2cscEx2(
handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, valType,
HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, buffer));
if(bufferSize > 0)
{
amgx::memory::hipFreeAsync(buffer);
}
}
void transpose_internal(hipsparseHandle_t handle, int nRows, int nCols, int nNz, const float* Avals, const int* Arows, const int* Acols, float* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, HIP_R_32F);
}
void transpose_internal(hipsparseHandle_t handle, int nRows, int nCols, int nNz, const double* Avals, const int* Arows, const int* Acols, double* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, HIP_R_64F);
}
void transpose_internal(hipsparseHandle_t handle, int nRows, int nCols, int nNz, const hipComplex* Avals, const int* Arows, const int* Acols, hipComplex* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, HIP_C_32F);
}
void transpose_internal(hipsparseHandle_t handle, int nRows, int nCols, int nNz, const hipDoubleComplex* Avals, const int* Arows, const int* Acols, hipDoubleComplex* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, HIP_C_64F);
}
template <class TConfig>
void Cusparse::transpose(const Matrix<TConfig>& A, Matrix<TConfig>& B, const int nRows, const int nNz)
{
hipsparseHandle_t handle = Cusparse::get_instance().m_handle;
transpose_internal(handle, nRows, A.get_num_cols(), nNz,
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
B.values.raw(), B.row_offsets.raw(), B.col_indices.raw());
}
template <class TConfig>
void Cusparse::transpose(const Matrix<TConfig>& A, Matrix<TConfig>& B)
{
hipsparseHandle_t handle = Cusparse::get_instance().m_handle;
transpose_internal(handle, A.get_num_rows(), A.get_num_cols(), A.get_num_nz(),
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
B.values.raw(), B.row_offsets.raw(), B.col_indices.raw());
}
//#define AMGX_CASE_LINE(CASE) template class Cusparse<TemplateMode<CASE>::Type>;
// AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
//#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrxmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type>&, typename Matrix<TemplateMode<CASE>::Type>::IVector&, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv_with_mask(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv_with_mask_restriction(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type>& );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, const typename Matrix<TemplateMode<CASE>::Type>::MVector&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(ColumnColorSelector, const int, const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const int, const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, const typename Matrix<TemplateMode<CASE>::Type>::MVector &, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::csrmm(typename TemplateMode<CASE>::Type::VecPrec, Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::transpose(const Matrix<TemplateMode<CASE>::Type>& A, Matrix<TemplateMode<CASE>::Type>& B); \
template void Cusparse::transpose(const Matrix<TemplateMode<CASE>::Type>& A, Matrix<TemplateMode<CASE>::Type>& B, const int nRows, const int nNz);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#ifndef DISABLE_MIXED_PRECISION
#define AMGX_CASE_LINE(CASE) template struct CusparseMatPrec<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#endif
} // namespace amgx
| f93c5838fd2ebbd40878e8ebdfd9d0b1aeb23c33.cu | /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cusparse_v2.h>
#include <error.h>
#include <matrix.h>
#include <vector.h>
#include <types.h>
#include <thrust/sequence.h>
#include <util.h>
#include <sm_utils.inl>
#include <device_properties.h>
#include <amgx_cusparse.h>
#include <amgx_types/util.h>
#if CUDART_VERSION < 11000
#define CUSPARSE_SPMM_ALG_DEFAULT CUSPARSE_MM_ALG_DEFAULT
#endif
#if CUDART_VERSION >= 12000
#define CUSPARSE_CSRMV_ALG1 CUSPARSE_SPMV_CSR_ALG1
#define CUSPARSE_CSRMV_ALG2 CUSPARSE_SPMV_CSR_ALG2
#endif
namespace amgx
{
Cusparse::Cusparse() : m_handle(0)
{
cusparseCheckError( cusparseCreate(&m_handle) );
}
Cusparse::~Cusparse()
{
destroy_handle();
}
Cusparse &Cusparse::get_instance()
{
static Cusparse s_instance;
s_instance.create_handle();
return s_instance;
}
#ifndef DISABLE_MIXED_PRECISION
template <class T_Config>
cusparseStatus_t
CusparseMatPrec<T_Config>::set(cusparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, true);
}
template <AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indPrec>
cusparseStatus_t CusparseMatPrec< TemplateConfig<t_memSpace, AMGX_vecDouble, AMGX_matFloat, t_indPrec> >::set(cusparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, false);
}
template <AMGX_MemorySpace t_memSpace, AMGX_IndPrecision t_indPrec>
cusparseStatus_t CusparseMatPrec< TemplateConfig<t_memSpace, AMGX_vecDoubleComplex, AMGX_matComplex, t_indPrec> >::set(cusparseMatDescr_t &cuMatDescr)
{
return cusparseSetMatFullPrecision(cuMatDescr, false);
}
#endif
template< class TConfig >
void Cusparse::bsrmv(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
cudaStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
// Handle cases where the view is set by the calling routine
if(view != A.getViewExterior())
{
bsrmv_internal(alphaConst, A, x, betaConst, y, view, null_stream);
return;
}
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(x, x.tag);
// Multiply interior rows
bsrmv_internal(alphaConst, A, x, betaConst, y, A.getViewInterior(), null_stream);
// Finish halo exchange
A.manager->exchange_halo_split_finish(x, x.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
bsrmv_internal(alphaConst, A, x, betaConst, y, bnd_view, null_stream);
}
else
{
if (!A.is_matrix_singleGPU() && x.dirtybit != 0)
{
A.manager->exchange_halo_v2(x, x.tag);
}
bsrmv_internal(alphaConst, A, x, betaConst, y, A.getViewExterior(), null_stream);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrmv_with_mask(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y)
{
cudaStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(x, x.tag);
// Multiply interior
bsrmv_internal_with_mask(alphaConst, A, x, betaConst, y, INTERIOR, null_stream);
A.manager->exchange_halo_split_finish(x, x.tag);
// Multiply exterior
bsrmv_internal_with_mask(alphaConst, A, x, betaConst, y, BOUNDARY, null_stream);
}
else
{
if (!A.is_matrix_singleGPU() && x.dirtybit != 0)
{
A.manager->exchange_halo_v2(x, x.tag);
}
bsrmv_internal(alphaConst, A, x, betaConst, y, OWNED, null_stream);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrmv_with_mask_restriction(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &R,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
Matrix<TConfig> &P)
{
// If only COO, add CSR since bsrmv doesn't support COO
//if (A.hasProps(COO) && !A.hasProps(CSR))
//{
// A.set_initialized(0);
// A.addProps(CSR);
// A.computeDiagonal();
// A.set_initialized(1);
//}
bool latencyHiding = (R.getViewInterior() != R.getViewExterior() && !P.is_matrix_singleGPU() && x.dirtybit != 0);
if (latencyHiding)
{
cudaStream_t null_stream = 0;
bsrmv_internal_with_mask_restriction(alphaConst, R, x, betaConst, y, HALO1, null_stream, P);
P.manager->add_from_halo_split_gather(y, y.tag);
cudaEventRecord(P.manager->get_comm_event());
bsrmv_internal_with_mask_restriction(alphaConst, R, x, betaConst, y, OWNED, null_stream, P);
if (P.manager->neighbors.size() != 0)
{
cudaEventSynchronize(P.manager->get_comm_event());
P.manager->add_from_halo_split_finish(y, y.tag, P.manager->get_bdy_stream());
cudaStreamSynchronize(P.manager->get_bdy_stream());
}
}
else
{
bsrmv_internal(alphaConst, R, x, betaConst, y, OWNED, 0);
// Add contribution from neighbors
P.manager->add_from_halo_v2(y, y.tag);
}
y.dirtybit = 1;
}
template< class TConfig >
void Cusparse::bsrxmv(
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
typename Matrix<TConfig>::IVector &mask,
ViewType view )
{
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.set_initialized(0);
A.addProps(CSR);
A.computeDiagonal();
A.set_initialized(1);
}
const int *start_offsets, *end_offsets;
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
if (view == OWNED) //This is already a view, thus do not even attempt to do latency hiding
{
bsrxmv_internal(Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE,
mask.size(),
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
A.values.raw(),
mask.raw(),
start_offsets, end_offsets,
A.col_indices.raw(),
A.get_block_dimx(),
x.raw(),
&betaConst,
y.raw());
}
else //Try and do latency hiding
{
// latency hiding?
FatalError("Trying to do latency hiding in the bsrxmv", AMGX_ERR_NOT_IMPLEMENTED);
}
}
// E is a vector that represents a diagonal matrix
// operate on all rows and columns
// y= alpha*E.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
cudaStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, view, null_stream);
}
else //Try and do latency hiding
{
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
ViewType flags;
bool latencyHiding = true;
//if (A.manager->num_neighbors() == 0 || (x.dirtybit == 0)) {
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU())
// if (y.size() == x.size() && y.delayed_send==0) A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
// operate only on columns specified by columnColorSelector, see enum ColumnColorSelector above
// operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color
// y= alpha*A.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( ColumnColorSelector columnColorSelector,
const int color,
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view )
{
cudaStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
// std::cerr << "exterior view with selector" << std::endl;
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, view, null_stream);
}
else
{
//Try and do latency hiding
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
ViewType flags;
bool latencyHiding = true;
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(columnColorSelector, color, alphaConst, A, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && y.size() == x.size() && y.delayed_send==0)
// A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
// E is a vector that represents a diagonal matrix
// operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color
// y= alpha*E.x + beta*y
template< class TConfig >
void Cusparse::bsrmv( const int color,
const typename TConfig::VecPrec alphaConst,
Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view)
{
cudaStream_t null_stream = 0;
// If only COO, add CSR since bsrmv doesn't support COO
if (A.hasProps(COO) && !A.hasProps(CSR))
{
A.addProps(CSR);
}
if (view != A.getViewExterior()) //This is already a view, thus do not even attempt to do latency hiding
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, view, null_stream);
}
else //Try and do latency hiding
{
//std::ccooor << "de" << std::endl;
//std::cerr << "not an exterior view" << std::endl;
ViewType oldView = A.currentView();
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_async(x, x.tag);
}
if (A.getViewExterior() == A.getViewInterior())
{
//std::cerr << "exchange_halo_wait" << std::endl;
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
}
//std::cerr << "xxxeded" << std::endl;
ViewType flags;
bool latencyHiding = true;
if (A.is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
A.setViewExterior();
flags = (ViewType)(A.getViewExterior());
}
else
{
flags = (ViewType)(A.getViewInterior());
A.setViewInterior();
}
if (latencyHiding)
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo_wait(x, x.tag);
}
A.setViewExterior();
flags = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
if (flags != 0)
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
}
else
{
bsrmv_internal(color, alphaConst, A, E, x, betaConst, y, flags, null_stream);
}
y.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && y.size() == x.size() && y.delayed_send==0)
// A.manager->exchange_halo_async(y, y.tag);
A.setView(oldView);
}
}
__global__ void offset_by_col_off(int nrows, int* rows, const int* bsrRowPtr)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= nrows+1)
{
return;
}
rows[i] = bsrRowPtr[i] - bsrRowPtr[0];
}
template< class TConfig >
void Cusparse::bsrmv_internal( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream)
{
typedef typename TConfig::VecPrec ValueTypeB;
int rowOff, nrows, nnz;
A.getOffsetAndSizeForView(view, &rowOff, &nrows);
A.getNnzForView(view, &nnz);
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
bool has_offdiag = nnz != 0;
if (has_offdiag )
{
bsrmv( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.m_seq_offsets.raw() + rowOff,
A.row_offsets.raw() + rowOff,
A.col_indices.raw(),
rowOff,
A.get_block_dimx(),
x.raw(),
&betaConst,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
if (A.hasProps(DIAG))
{
ValueTypeB beta;
if (!has_offdiag)
{
beta = betaConst;
}
else
{
beta = types::util<ValueTypeB>::get_one();
}
bsrmv( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), A.get_num_rows(), &alphaConst,
A.cuMatDescr,
A.values.raw() + A.diagOffset()*A.get_block_size(),
A.m_seq_offsets.raw(),
A.m_seq_offsets.raw() + rowOff,
A.m_seq_offsets.raw(),
rowOff,
A.get_block_dimx(),
x.raw(), &beta,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal_with_mask( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream)
{
if (A.is_matrix_singleGPU())
{
FatalError("Should not be here in bsrmv_internal_with_mask", AMGX_ERR_NOT_IMPLEMENTED);
}
if(view != INTERIOR && view != BOUNDARY)
{
FatalError("Only INTERIOR and BOUNDARY views supported for bsrmv_internal_with_mask", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename TConfig::VecPrec ValueType;
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
const int *start_offsets, *end_offsets;
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
typedef typename Matrix<TConfig>::index_type index_type;
int offset, nrows, nnz;
A.getOffsetAndSizeForView(view, &offset, &nrows);
A.getNnzForView(view, &nnz);
if (nrows <= 0)
{
return; // nothing to do, early exit
}
bool has_offdiag = nnz != 0;
if (has_offdiag)
{
cusparseSetStream(Cusparse::get_instance().m_handle, stream);
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE, nrows,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.manager->getRowsListForView(view).raw(),
start_offsets, end_offsets, A.col_indices.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
// Reset to default stream
cusparseSetStream(Cusparse::get_instance().m_handle, 0);
}
if (A.hasProps(DIAG))
{
FatalError("Diag not supported in multiply with mask\n", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal_with_mask_restriction( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &R,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream,
Matrix<TConfig> &P)
{
if (P.is_matrix_singleGPU())
{
FatalError("Should not be here in bsrmv_internal_with_mask_with_restriction", AMGX_ERR_NOT_IMPLEMENTED);
}
if(view != OWNED && view != HALO1)
{
FatalError("View not supported in restriction operation", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename TConfig::VecPrec ValueType;
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( R.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
int rowOff, nrows, nnz;
R.getFixedSizesForView(view, &rowOff, &nrows, &nnz);
bool has_offdiag = nnz != 0;
typedef typename Matrix<TConfig>::index_type index_type;
if (nrows <= 0)
{
return; // nothing to do, early exit
}
if (has_offdiag)
{
bsrmv( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE,
nrows, R.get_num_cols(), nnz, &alphaConst,
R.cuMatDescr,
R.values.raw(),
R.m_seq_offsets.raw() + rowOff,
R.row_offsets.raw() + rowOff,
R.col_indices.raw(),
rowOff,
R.get_block_dimx(),
x.raw(),
&betaConst,
y.raw() + rowOff * R.get_block_dimx(),
stream);
}
if (R.hasProps(DIAG))
{
FatalError("Diag not supported in multiply with mask\n", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template< class TConfig >
void Cusparse::bsrmv_internal( const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
int rowOff, nrows, nnz;
A.getFixedSizesForView(view, &rowOff, &nrows, &nnz);
cusparseDirection_t direction = A.getBlockFormat() == ROW_MAJOR ? CUSPARSE_DIRECTION_ROW : CUSPARSE_DIRECTION_COLUMN;
bsrmv( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE,
nrows, A.get_num_cols(), nnz, &alphaConst,
A.cuMatDescr,
E.raw(),
A.m_seq_offsets.raw(),
A.m_seq_offsets.raw() + rowOff,
A.m_seq_offsets.raw(),
rowOff,
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() + rowOff * A.get_block_dimx(),
stream);
}
template< class TConfig >
void Cusparse::bsrmv_internal( ColumnColorSelector columnColorSelector,
const int color,
const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
if (!A.hasProps(COLORING))
{
FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS);
}
if (color < 0 || color >= A.getMatrixColoring().getNumColors())
{
FatalError("Unknown color", AMGX_ERR_BAD_PARAMETERS);
}
typedef typename Matrix<TConfig>::index_type index_type;
index_type colorStart = 0;
if ( !(view & INTERIOR) )
{
colorStart = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorStart = A.getMatrixColoring().getOffsetsRowsPerColor()[color];
}
index_type colorNum = 0;
if ( view == A.getViewInterior() )
{
colorNum = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorNum = A.getMatrixColoring().getOffsetsRowsPerColor()[color + 1];
}
colorNum -= colorStart;
if (colorNum <= 0)
{
return; // nothing to do, early exit
}
if (columnColorSelector == DIAG_COL)
{
FatalError("DIAG_COL has been disabled to avoid allocating diag_offsets", AMGX_ERR_NOT_IMPLEMENTED);
}
const int *start_offsets, *end_offsets;
switch (columnColorSelector)
{
case ALL_COLORS:
start_offsets = A.row_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
break;
case SMALLER_COLORS:
start_offsets = A.row_offsets.raw();
end_offsets = A.m_smaller_color_offsets.raw();
break;
case LARGER_COLORS:
start_offsets = A.m_larger_color_offsets.raw();
end_offsets = A.row_offsets.raw() + 1;
break;
case DIAG_COL:
start_offsets = A.diag.raw();
end_offsets = A.m_diag_end_offsets.raw();
break;
default:
FatalError("Unknown color selector", AMGX_ERR_CORE);
}
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
bool has_offdiag = A.get_num_nz() != 0;
cusparseSetStream(Cusparse::get_instance().m_handle, stream);
if (has_offdiag)
{
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
A.values.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
start_offsets, end_offsets, A.col_indices.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
}
if (A.hasProps(DIAG) && columnColorSelector == ALL_COLORS)
{
typename TConfig::VecPrec beta = betaConst;
if (has_offdiag)
{
beta = types::util<typename TConfig::VecPrec>::get_one();
}
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_rows(), &alphaConst,
A.cuMatDescr,
A.values.raw() + A.diagOffset()*A.get_block_size(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
A.m_seq_offsets.raw(),
NULL,
A.m_seq_offsets.raw(),
A.get_block_dimx(),
x.raw(), &beta,
y.raw() );
}
// Reset to default stream
cusparseSetStream(Cusparse::get_instance().m_handle, 0);
}
template< class TConfig >
void Cusparse::bsrmv_internal( const int color,
const typename TConfig::VecPrec alphaConst,
const Matrix<TConfig> &A,
const typename Matrix<TConfig>::MVector &E,
const Vector<TConfig> &x,
const typename TConfig::VecPrec betaConst,
Vector<TConfig> &y,
ViewType view,
const cudaStream_t &stream)
{
typedef typename TConfig::VecPrec ValueType;
if ( !A.hasProps(COLORING) )
{
FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS);
}
if ( color < 0 || color >= A.getMatrixColoring().getNumColors() )
{
FatalError("Unknown color", AMGX_ERR_BAD_PARAMETERS);
}
typedef typename Matrix<TConfig>::index_type index_type;
//const index_type colorStart = ((view & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color] : A.getMatrixColoring().getOffsetsRowsPerColor()[color];
//const index_type colorNum = ((view == A.getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color] : A.getMatrixColoring().getOffsetsRowsPerColor()[color+1]) - colorStart;
//const index_type colorStart= A.getMatrixColoring().getOffsetsRowsPerColor()[color];
//const index_type colorNum= A.getMatrixColoring().getOffsetsRowsPerColor()[color+1] - colorStart;
index_type colorStart = 0;
if ( !(view & INTERIOR) )
{
colorStart = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorStart = A.getMatrixColoring().getOffsetsRowsPerColor()[color];
}
index_type colorNum = 0;
if ( view == A.getViewInterior() )
{
colorNum = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[color];
}
else
{
colorNum = A.getMatrixColoring().getOffsetsRowsPerColor()[color + 1];
}
colorNum -= colorStart;
if (colorNum <= 0)
{
return; // nothing to do, early exit
}
cusparseDirection_t direction = CUSPARSE_DIRECTION_COLUMN;
if ( A.getBlockFormat() == ROW_MAJOR )
{
direction = CUSPARSE_DIRECTION_ROW;
}
cusparseSetStream(Cusparse::get_instance().m_handle, stream);
bsrxmv_internal( Cusparse::get_instance().m_handle, direction, CUSPARSE_OPERATION_NON_TRANSPOSE, colorNum,
A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), &alphaConst,
A.cuMatDescr,
E.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + colorStart,
A.m_seq_offsets.raw(),
NULL,
A.m_seq_offsets.raw(),
A.get_block_dimx(),
x.raw(), &betaConst,
y.raw() );
// Reset to default stream
cusparseSetStream(Cusparse::get_instance().m_handle, 0);
}
#ifdef CUSPARSE_GENERIC_INTERFACES
// Simple custom implementation of matrix-vector product that has only 1 kernel.
template<unsigned UNROLL, class T>
__global__ void csrmv(
int nrows,
const T alpha,
const T* __restrict__ csrVal,
const int* __restrict__ csrRow,
const int* __restrict__ csrCol,
const T* __restrict__ x,
const T beta,
T* __restrict__ y)
{
for(int i = threadIdx.x + blockIdx.x*blockDim.x; i < nrows; i += blockDim.x*gridDim.x)
{
T y_tmp = amgx::types::util<T>::get_zero();
int row_b = csrRow[i];
int row_e = csrRow[i+1];
// Unrolling is important for performance here.
// Possible to squeeze more performance out of the key kernels if we
// measure the sparsity and use it to inform unrolling.
for (int col = row_b; col < row_e; col += UNROLL)
{
#pragma unroll UNROLL
for(int off = 0; off < UNROLL; ++off)
{
int c = col + off;
if(c < row_e) y_tmp = alpha * csrVal[c] * x[csrCol[c]] + y_tmp;
}
}
// Don't read y unnecessarily
if(amgx::types::util<T>::is_zero(beta))
{
y[i] = y_tmp;
}
else
{
y[i] = beta*y[i] + y_tmp;
}
}
}
template<class MatType, class VecType, class IndType>
inline void generic_SpMV(cusparseHandle_t handle, cusparseOperation_t trans,
int mb, int nb, int nnzb, int rowOff,
const MatType *alpha,
const MatType *val,
const IndType *rowPtr,
const IndType *colInd,
const VecType *x,
const VecType *beta,
VecType *y,
cudaDataType matType,
cudaDataType vecType,
const cudaStream_t& stream)
{
constexpr int cta_size = 128;
const int sm_count = getSMCount();
// Assuming that csrmv will be more efficient than cuSPARSE for row counts
// that are lower than the 3 times the total number of threads
// cuSPARSE does not like the offsetting required when latency hiding
// it's possible to reverse the offsets, but requires extra kernel invocation
// and usually the dependent part of the call is smaller
if(rowOff > 0 || mb < cta_size * sm_count * 3)
{
// Custom single-kernel SpMV, we could actually determine unroll factor
// more accurately here by checking non-zeros per row
constexpr int unroll_factor = 16;
int nblocks = mb / cta_size + 1;
csrmv<unroll_factor><<<nblocks, cta_size>>>(mb, *alpha, val, rowPtr, colInd, x, *beta, y);
}
else
{
IndType* rows = const_cast<IndType*>(rowPtr);
IndType* cols = const_cast<IndType*>(colInd);
MatType* vals = const_cast<MatType*>(val);
cusparseSpMatDescr_t matA_descr;
cusparseDnVecDescr_t vecX_descr;
cusparseDnVecDescr_t vecY_descr;
cusparseCheckError(cusparseCreateDnVec(&vecX_descr, nb, const_cast<VecType*>(x), vecType));
cusparseCheckError(cusparseCreateDnVec(&vecY_descr, mb, const_cast<VecType*>(y), vecType));
cusparseCheckError(
cusparseCreateCsr(&matA_descr, mb, nb, nnzb, const_cast<IndType*>(rows), const_cast<IndType*>(cols),
const_cast<MatType*>(vals), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, matType));
size_t bufferSize = 0;
cusparseCheckError(cusparseSpMV_bufferSize(handle, trans, alpha, matA_descr, vecX_descr, beta, vecY_descr, matType, CUSPARSE_CSRMV_ALG2, &bufferSize));
void* dBuffer = NULL;
if(bufferSize > 0)
{
amgx::memory::cudaMalloc(&dBuffer, bufferSize);
}
cusparseCheckError(cusparseSpMV(handle, trans, alpha, matA_descr, vecX_descr, beta, vecY_descr, matType, CUSPARSE_CSRMV_ALG2, dBuffer) );
cusparseCheckError(cusparseDestroySpMat(matA_descr));
cusparseCheckError(cusparseDestroyDnVec(vecX_descr));
cusparseCheckError(cusparseDestroyDnVec(vecY_descr));
if(bufferSize > 0)
{
amgx::memory::cudaFreeAsync(dBuffer);
}
}
}
#endif
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const float *alpha,
const cusparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const float *x,
const float *beta,
float *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, CUDA_R_32F, CUDA_R_32F, stream);
#else
cusparseCheckError(cusparseScsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(cusparseSbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const double *alpha,
const cusparseMatDescr_t descr,
const double *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const double *x,
const double *beta,
double *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, CUDA_R_64F, CUDA_R_64F, stream);
#else
cusparseCheckError(cusparseDcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(cusparseDbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const double *alpha,
const cusparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const double *x,
const double *beta,
double *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
#ifndef DISABLE_MIXED_PRECISION
const double *d_bsrVal = reinterpret_cast<const double *>(const_cast<float *>(bsrVal)); // this works due to private API call in the matrix initialization which sets cusparse matrix description in the half precision mode
cusparseCheckError(cusparseDbsrxmv(handle, dir, trans, mb, mb, nb, nnzb, alpha, descr, d_bsrVal, bsrMaskPtr, bsrRowPtr, bsrRowPtr + 1, bsrColInd, blockDim, x, beta, y));
#else
FatalError("Mixed precision modes not currently supported for CUDA 10.1 or later.", AMGX_ERR_NOT_IMPLEMENTED);
#endif
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
// Custom implementation of matrix-vector product to replace the original bsrxmv,
// but with block size of 1.
template<unsigned UNROLL, class T>
__global__ void csrxmv(
int sizeOfMask,
const T alpha,
const T* __restrict__ csrVal,
const int* __restrict__ csrMask,
const int* __restrict__ csrRow,
const int* __restrict__ csrCol,
const T* __restrict__ x,
const T beta,
T* __restrict__ y)
{
for(int i = threadIdx.x + blockIdx.x*blockDim.x; i < sizeOfMask; i += blockDim.x*gridDim.x)
{
int row = csrMask[i];
T y_tmp = amgx::types::util<T>::get_zero();
int row_b = csrRow[row];
int row_e = csrRow[row+1];
// Unrolling is important for performance here.
// Possible to squeeze more performance out of the key kernels if we
// measure the sparsity and use it to inform unrolling.
for (int col = row_b; col < row_e; col += UNROLL)
{
#pragma unroll UNROLL
for(int off = 0; off < UNROLL; ++off)
{
int c = col + off;
if(c < row_e) y_tmp = alpha * csrVal[c] * x[csrCol[c]] + y_tmp;
}
}
// Don't read y unnecessarily
if(amgx::types::util<T>::is_zero(beta))
{
y[row] = y_tmp;
}
else
{
y[row] = beta*y[row] + y_tmp;
}
}
}
// Replaces the functionality of cusparse?bsrxmv for blockDim == 1
template<class T>
inline void Xcsrxmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const T *alpha,
const cusparseMatDescr_t descr,
const T *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const T *x,
const T *beta,
T *y)
{
if(blockDim != 1)
{
FatalError("Xcsrxmv only to be called with scalar matrices.", AMGX_ERR_INTERNAL);
}
if (trans != CUSPARSE_OPERATION_NON_TRANSPOSE)
{
FatalError("Cannot currently latency hide a transposed matrix.", AMGX_ERR_NOT_IMPLEMENTED);
}
if (dir != CUSPARSE_DIRECTION_ROW)
{
FatalError("Cannot currently latency hide if matrix is not row major.", AMGX_ERR_NOT_IMPLEMENTED);
}
constexpr int cta_size = 128;
constexpr int unroll_factor = 16;
int nblocks = sizeOfMask / cta_size + 1;
csrxmv<unroll_factor><<<nblocks, cta_size>>>(sizeOfMask, *alpha, bsrVal, bsrMaskPtr, bsrRowPtr, bsrColInd, x, *beta, y);
}
// overloaded C++ wrappers for cusparse?bsrxmv
// bsrxmv
// matrix - float
// vector - float
inline void Cusparse::bsrxmv_internal(cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const float *alpha,
const cusparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const float *x,
const float *beta,
float *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseSbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
// bsrxmv
// matrix - float
// vector - double
inline void Cusparse::bsrxmv_internal( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const double *alpha,
const cusparseMatDescr_t descr,
const float *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const double *x,
const double *beta,
double *y)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
// bsrxmv
// matrix - double
// vector - double
inline void Cusparse::bsrxmv_internal( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const double *alpha,
const cusparseMatDescr_t descr,
const double *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const double *x,
const double *beta,
double *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseDbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const cuComplex *alpha,
const cusparseMatDescr_t descr,
const cuComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const cuComplex *x,
const cuComplex *beta,
cuComplex *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, CUDA_C_32F, CUDA_C_32F, stream);
#else
cusparseCheckError(cusparseCcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(cusparseCbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descr,
const cuDoubleComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
if (blockDim == 1)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMV(handle, trans, mb, nb, nnzb, rowOff, alpha, bsrVal, bsrRowPtr, bsrColInd, x, beta, y, CUDA_C_64F, CUDA_C_64F, stream);
#else
cusparseCheckError(cusparseZcsrmv(handle, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, x, beta, y));
#endif
}
else
{
cusparseCheckError(cusparseZbsrmv(handle, dir, trans, mb, nb, nnzb, alpha, descr, bsrVal, bsrRowPtr, bsrColInd, blockDim, x, beta, y));
}
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
inline void Cusparse::bsrmv( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans,
int mb, int nb, int nnzb,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descr,
const cuComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrColInd,
int rowOff,
int blockDim,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y,
const cudaStream_t& stream)
{
// Run cuSparse on selected stream
cusparseSetStream(handle, stream);
#ifndef DISABLE_MIXED_PRECISION
const cuDoubleComplex *d_bsrVal = reinterpret_cast<cuDoubleComplex *>(const_cast<cuComplex *>(bsrVal));
cusparseCheckError(cusparseZbsrxmv(handle, dir, trans, mb, mb, nb, nnzb, alpha, descr, d_bsrVal, bsrMaskPtr, bsrRowPtr, bsrRowPtr + 1, bsrColInd, blockDim, x, beta, y));
#else
FatalError("Mixed precision modes not currently supported for CUDA 10.1 or later.", AMGX_ERR_NOT_IMPLEMENTED);
#endif
// Reset cuSparse to default stream
cusparseSetStream(handle, 0);
}
// overloaded C++ wrappers for cusparse?bsrxmv
// bsrxmv
// matrix - cuComplex
// vector - cuComplex
inline void Cusparse::bsrxmv_internal( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const cuComplex *alpha,
const cusparseMatDescr_t descr,
const cuComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const cuComplex *x,
const cuComplex *beta,
cuComplex *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseCbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
// bsrxmv
// matrix - cuComplex
// vector - cuDoubleComplex
inline void Cusparse::bsrxmv_internal( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descr,
const cuComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
// bsrxmv
// matrix - cuDoubleComplex
// vector - cuDoubleComplex
inline void Cusparse::bsrxmv_internal( cusparseHandle_t handle, cusparseDirection_t dir, cusparseOperation_t trans, int sizeOfMask,
int mb, int nb, int nnzb,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descr,
const cuDoubleComplex *bsrVal,
const int *bsrMaskPtr,
const int *bsrRowPtr,
const int *bsrEndPtr,
const int *bsrColInd,
int blockDim,
const cuDoubleComplex *x,
const cuDoubleComplex *beta,
cuDoubleComplex *y)
{
if (bsrEndPtr == NULL && bsrMaskPtr == NULL)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
else
{
if (bsrEndPtr == NULL)
{
bsrEndPtr = bsrRowPtr + 1;
}
if (blockDim == 1)
{
Xcsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y);
}
else
{
cusparseCheckError(cusparseZbsrxmv(handle, dir, trans, sizeOfMask, mb, nb, nnzb, alpha, descr, bsrVal, bsrMaskPtr, bsrRowPtr, bsrEndPtr, bsrColInd, blockDim, x, beta, y));
}
}
}
namespace
{
#ifdef CUSPARSE_GENERIC_INTERFACES
template<class MatType, class IndType>
inline void
generic_SpMM(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
int ldb, int ldc,
const MatType *alpha,
const MatType *Avals,
const MatType *Bvals,
MatType *Cvals,
const IndType *rowPtr,
const IndType *colInd,
const MatType *beta,
cudaDataType matType)
{
// Create the matrix descriptors
cusparseSpMatDescr_t matA_descr;
cusparseDnMatDescr_t matB_descr;
cusparseDnMatDescr_t matC_descr;
cusparseCheckError(
cusparseCreateCsr(&matA_descr, m, k, nnz, const_cast<IndType*>(rowPtr), const_cast<IndType*>(colInd),
const_cast<MatType*>(Avals), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, matType));
cusparseCheckError(
cusparseCreateDnMat(&matB_descr, k, n, ldb, const_cast<MatType*>(Bvals), matType, CUSPARSE_ORDER_COL));
cusparseCheckError(
cusparseCreateDnMat(&matC_descr, m, n, ldc, const_cast<MatType*>(Cvals), matType, CUSPARSE_ORDER_COL));
// Check if a buffer is required, and if so allocate it using caching allocator
size_t bufferSize = 0;
cusparseCheckError(
cusparseSpMM_bufferSize(handle, transA, CUSPARSE_OPERATION_NON_TRANSPOSE, alpha, matA_descr, matB_descr,
beta, matC_descr, matType, CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize));
void* dBuffer = NULL;
if(bufferSize > 0)
{
amgx::memory::cudaMalloc(&dBuffer, bufferSize);
}
// Compute the sparse matrix - dense matrix product
cusparseCheckError(
cusparseSpMM(handle, transA, CUSPARSE_OPERATION_NON_TRANSPOSE, alpha, matA_descr, matB_descr, beta,
matC_descr, matType, CUSPARSE_SPMM_ALG_DEFAULT, dBuffer));
// Clean up
cusparseCheckError(cusparseDestroySpMat(matA_descr));
cusparseCheckError(cusparseDestroyDnMat(matB_descr));
cusparseCheckError(cusparseDestroyDnMat(matC_descr));
if(bufferSize > 0)
{
amgx::memory::cudaFreeAsync(dBuffer);
}
}
#endif
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const float *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const float *B, int ldb,
const float *beta, float *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, CUDA_R_32F);
#else
cusparseCheckError(cusparseScsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const double *alpha,
const cusparseMatDescr_t descrA,
const float *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const double *alpha,
const cusparseMatDescr_t descrA,
const double *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, CUDA_R_64F);
#else
cusparseCheckError(cusparseDcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const cuComplex *alpha,
const cusparseMatDescr_t descrA,
const cuComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const cuComplex *B, int ldb,
const cuComplex *beta, cuComplex *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, CUDA_C_32F);
#else
cusparseCheckError(cusparseCcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
const cuComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const cuDoubleComplex *B, int ldb,
const cuDoubleComplex *beta, cuDoubleComplex *C, int ldc)
{
cusparseCheckError(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
}
void
cusparse_csrmm(cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int k, int nnz,
const cuDoubleComplex *alpha,
const cusparseMatDescr_t descrA,
const cuDoubleComplex *csrValA,
const int *csrRowPtrA, const int *csrColIndA,
const cuDoubleComplex *B, int ldb,
const cuDoubleComplex *beta, cuDoubleComplex *C, int ldc)
{
#ifdef CUSPARSE_GENERIC_INTERFACES
generic_SpMM(handle, transA, m, n, k, nnz, ldb, ldc, alpha, csrValA, B, C, csrRowPtrA, csrColIndA, beta, CUDA_C_64F);
#else
cusparseCheckError(cusparseZcsrmm(handle, transA, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc));
#endif
}
}
template <class TConfig>
void Cusparse::csrmm(typename TConfig::VecPrec alpha,
Matrix<TConfig> &A,
Vector<TConfig> &V,
typename TConfig::VecPrec beta,
Vector<TConfig> &Res)
{
if (!A.is_matrix_singleGPU())
{
A.manager->exchange_halo(V, V.tag);
}
if (Res.get_num_rows() != A.get_num_rows() || Res.get_num_cols() != V.get_num_cols())
{
FatalError("Cusparse::csrmm error, dimensions of result matrix do not match input matrices.", AMGX_ERR_INTERNAL);
}
cusparseHandle_t handle = Cusparse::get_instance().m_handle;
cusparse_csrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A.get_num_rows(), V.get_num_cols(), A.get_num_cols(),
A.values.size(), &alpha, A.cuMatDescr,
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
V.raw(), V.get_lda(),
&beta, Res.raw(), Res.get_lda());
Res.dirtybit = 1;
}
template <class T>
void transpose_internal(cusparseHandle_t handle, int nRows, int nCols, int nNz, const T* Avals, const int* Arows, const int* Acols, T* Bvals, int* Brows, int* Bcols, cudaDataType valType)
{
size_t bufferSize;
cusparseCheckError(cusparseCsr2cscEx2_bufferSize(
handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, valType,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, &bufferSize));
void *buffer = nullptr;
if (bufferSize > 0)
{
amgx::memory::cudaMalloc(&buffer, bufferSize);
}
cusparseCheckError(cusparseCsr2cscEx2(
handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, valType,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, buffer));
if(bufferSize > 0)
{
amgx::memory::cudaFreeAsync(buffer);
}
}
void transpose_internal(cusparseHandle_t handle, int nRows, int nCols, int nNz, const float* Avals, const int* Arows, const int* Acols, float* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, CUDA_R_32F);
}
void transpose_internal(cusparseHandle_t handle, int nRows, int nCols, int nNz, const double* Avals, const int* Arows, const int* Acols, double* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, CUDA_R_64F);
}
void transpose_internal(cusparseHandle_t handle, int nRows, int nCols, int nNz, const cuComplex* Avals, const int* Arows, const int* Acols, cuComplex* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, CUDA_C_32F);
}
void transpose_internal(cusparseHandle_t handle, int nRows, int nCols, int nNz, const cuDoubleComplex* Avals, const int* Arows, const int* Acols, cuDoubleComplex* Bvals, int* Brows, int* Bcols)
{
transpose_internal(handle, nRows, nCols, nNz, Avals, Arows, Acols, Bvals, Brows, Bcols, CUDA_C_64F);
}
template <class TConfig>
void Cusparse::transpose(const Matrix<TConfig>& A, Matrix<TConfig>& B, const int nRows, const int nNz)
{
cusparseHandle_t handle = Cusparse::get_instance().m_handle;
transpose_internal(handle, nRows, A.get_num_cols(), nNz,
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
B.values.raw(), B.row_offsets.raw(), B.col_indices.raw());
}
template <class TConfig>
void Cusparse::transpose(const Matrix<TConfig>& A, Matrix<TConfig>& B)
{
cusparseHandle_t handle = Cusparse::get_instance().m_handle;
transpose_internal(handle, A.get_num_rows(), A.get_num_cols(), A.get_num_nz(),
A.values.raw(), A.row_offsets.raw(), A.col_indices.raw(),
B.values.raw(), B.row_offsets.raw(), B.col_indices.raw());
}
//#define AMGX_CASE_LINE(CASE) template class Cusparse<TemplateMode<CASE>::Type>;
// AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
//#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrxmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type>&, typename Matrix<TemplateMode<CASE>::Type>::IVector&, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv_with_mask(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv_with_mask_restriction(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type>& );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, const typename Matrix<TemplateMode<CASE>::Type>::MVector&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(ColumnColorSelector, const int, const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::bsrmv(const int, const typename TemplateMode<CASE>::Type::VecPrec , Matrix<TemplateMode<CASE>::Type>&, const typename Matrix<TemplateMode<CASE>::Type>::MVector &, Vector<TemplateMode<CASE>::Type>&, const typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::csrmm(typename TemplateMode<CASE>::Type::VecPrec, Matrix<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type>&, typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) \
template void Cusparse::transpose(const Matrix<TemplateMode<CASE>::Type>& A, Matrix<TemplateMode<CASE>::Type>& B); \
template void Cusparse::transpose(const Matrix<TemplateMode<CASE>::Type>& A, Matrix<TemplateMode<CASE>::Type>& B, const int nRows, const int nNz);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#ifndef DISABLE_MIXED_PRECISION
#define AMGX_CASE_LINE(CASE) template struct CusparseMatPrec<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#endif
} // namespace amgx
|
7b4cd285d4e01ed36eaa55ede3200de5004d1269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan.h"
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_SIZE 65535
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define NO_BANK_CONFLICTS
#ifdef NO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> (2 * LOG_NUM_BANKS)))
#else
#define CONFLICT_FREE_OFFSET(a) (0)
#endif
inline int pow2roundup (int x)
{
if (x < 0)
return 0;
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x+1;
}
__global__ void exclusive_scan_kernel(float* dev_in, float* dev_out, int width, int height)
{
extern __shared__ float temp[];
//Offset pointers to this block's row. Avoids the need for more complex indexing
dev_in += width*blockIdx.x;
dev_out += width*blockIdx.x;
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < width)?dev_in[ai]:0;
temp[bi+bankOffsetB] = (bi < width)?dev_in[bi]:0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < width)
dev_out[ai] = temp[ai+bankOffsetA];
if(bi < width)
dev_out[bi] = temp[bi+bankOffsetB];
}
__host__ void exclusiveScanRows(float* dev_in, float* dev_out, int width, int height)
{
//Make sure matrix is limits of this kernel.
//Other algorithms can get around these limits, but this algorithm is simplified for the expected size of 640*480
assert(width <= 1024);
assert(height <= MAX_GRID_SIZE);
//Nearest power of two below width
int blockArraySize = pow2roundup(width);
dim3 threads(blockArraySize >> 1);//2 elements per thread
dim3 blocks(height);
int sharedCount = (blockArraySize+2)*sizeof(float);
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(blocks),dim3(threads),sharedCount, 0, dev_in, dev_out, width, height);
} | 7b4cd285d4e01ed36eaa55ede3200de5004d1269.cu | #include "scan.h"
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_SIZE 65535
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define NO_BANK_CONFLICTS
#ifdef NO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> (2 * LOG_NUM_BANKS)))
#else
#define CONFLICT_FREE_OFFSET(a) (0)
#endif
inline int pow2roundup (int x)
{
if (x < 0)
return 0;
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x+1;
}
__global__ void exclusive_scan_kernel(float* dev_in, float* dev_out, int width, int height)
{
extern __shared__ float temp[];
//Offset pointers to this block's row. Avoids the need for more complex indexing
dev_in += width*blockIdx.x;
dev_out += width*blockIdx.x;
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < width)?dev_in[ai]:0;
temp[bi+bankOffsetB] = (bi < width)?dev_in[bi]:0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < width)
dev_out[ai] = temp[ai+bankOffsetA];
if(bi < width)
dev_out[bi] = temp[bi+bankOffsetB];
}
__host__ void exclusiveScanRows(float* dev_in, float* dev_out, int width, int height)
{
//Make sure matrix is limits of this kernel.
//Other algorithms can get around these limits, but this algorithm is simplified for the expected size of 640*480
assert(width <= 1024);
assert(height <= MAX_GRID_SIZE);
//Nearest power of two below width
int blockArraySize = pow2roundup(width);
dim3 threads(blockArraySize >> 1);//2 elements per thread
dim3 blocks(height);
int sharedCount = (blockArraySize+2)*sizeof(float);
exclusive_scan_kernel<<<blocks,threads,sharedCount>>>(dev_in, dev_out, width, height);
} |
c21e0168f2fd79694d103aad1c9b3d1cb4028fc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void multi_scale_kernel(const float *data_in, const float *scale, float *data_out, int width, int height) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height) {
int index = y * width + x;
data_out[index] = data_in[index] * scale[y];
}
} | c21e0168f2fd79694d103aad1c9b3d1cb4028fc4.cu | #include "includes.h"
__global__ void multi_scale_kernel(const float *data_in, const float *scale, float *data_out, int width, int height) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height) {
int index = y * width + x;
data_out[index] = data_in[index] * scale[y];
}
} |
597ad4bacaae5825049a9939c6715f689ff955e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_sigmoid_diff(Dtype x) {
return x * (1. - x);
}
template <typename Dtype>
__device__ Dtype cuda_tanh(Dtype x) {
Dtype exp2x = exp(2 * x);
return fabs(x) < Dtype(5) ? ((exp2x - Dtype(1)) / (exp2x + Dtype(1))) :
(x > 0 ? Dtype(1) : Dtype(-1));
}
template <typename Dtype>
__device__ Dtype cuda_tanh_diff(Dtype x) {
return (1. - x * x);
}
template <typename Dtype>
__global__ void ForwardCombineGates(
int n,
bool tanh_hidden,
const Dtype* prev_state_data,
Dtype* input_gates,
Dtype* forget_gates,
Dtype* output_gates,
Dtype* input_values,
Dtype* tanh_next_memory_state,
Dtype* next_memory_state,
Dtype* next_hidden_state) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates[idx] = cuda_sigmoid(input_gates[idx]);
forget_gates[idx] = cuda_sigmoid(forget_gates[idx]);
output_gates[idx] = cuda_sigmoid(output_gates[idx]);
input_values[idx] = cuda_tanh(input_values[idx]);
next_memory_state[idx] = prev_state_data[idx] * forget_gates[idx] +
input_gates[idx] * input_values[idx];
if (tanh_hidden) {
tanh_next_memory_state[idx] = cuda_tanh(next_memory_state[idx]);
} else {
tanh_next_memory_state[idx] = next_memory_state[idx];
}
next_hidden_state[idx] = tanh_next_memory_state[idx] * output_gates[idx];
}
}
template <typename Dtype>
__global__ void BackwardGates(
int n,
bool tanh_hidden,
const Dtype* input_gates,
const Dtype* forget_gates,
const Dtype* output_gates,
const Dtype* input_values,
const Dtype* tanh_next_memory_state,
Dtype* input_gates_diff,
Dtype* forget_gates_diff,
Dtype* output_gates_diff,
Dtype* input_values_diff,
Dtype* tanh_next_memory_diff) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates_diff[idx] = cuda_sigmoid_diff(input_gates[idx]);
forget_gates_diff[idx] = cuda_sigmoid_diff(forget_gates[idx]);
output_gates_diff[idx] = cuda_sigmoid_diff(output_gates[idx]);
input_values_diff[idx] = cuda_tanh_diff(input_values[idx]);
if (tanh_hidden) {
tanh_next_memory_diff[idx] = cuda_tanh_diff(tanh_next_memory_state[idx]);
} else {
tanh_next_memory_diff[idx] = Dtype(1.);
}
}
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* prev_state_data = bottom[1]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
Dtype* next_hidden_state = top[0]->mutable_gpu_data();
Dtype* next_memory_state = top[1]->mutable_gpu_data();
Dtype* input_gates = input_gates_data_buffer_->mutable_gpu_data();
Dtype* forget_gates = forget_gates_data_buffer_->mutable_gpu_data();
Dtype* output_gates = output_gates_data_buffer_->mutable_gpu_data();
Dtype* input_values = input_values_data_buffer_->mutable_gpu_data();
Dtype* tanh_next_memory_state = tanh_mem_buffer_->mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_weight,
(Dtype)0., input_values);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_gate_weight,
(Dtype)0., input_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, forget_gate_weight,
(Dtype)0., forget_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, output_gate_weight,
(Dtype)0., output_gates);
if (this->layer_param_.lstm_unit_param().tie_output_forget()) {
caffe_gpu_set(channels_ * num_, Dtype(0), forget_gates);
caffe_gpu_sub(channels_ * num_, forget_gates, output_gates, forget_gates);
}
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ForwardCombineGates<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
this->layer_param_.lstm_unit_param().tanh_hidden(),
prev_state_data,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
next_memory_state,
next_hidden_state);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* prev_state_data = bottom[1]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
const Dtype* input_gates = input_gates_data_buffer_->gpu_data();
const Dtype* forget_gates = forget_gates_data_buffer_->gpu_data();
const Dtype* output_gates = output_gates_data_buffer_->gpu_data();
const Dtype* input_values = input_values_data_buffer_->gpu_data();
const Dtype* tanh_next_memory_state = tanh_mem_buffer_->gpu_data();
Dtype* gates_diff = gates_diff_buffer_->mutable_gpu_data();
Dtype* input_gates_diff = gates_diff + channels_ * num_ * 0;
Dtype* forget_gates_diff = gates_diff + channels_ * num_ * 1;
Dtype* output_gates_diff = gates_diff + channels_ * num_ * 2;
Dtype* input_values_diff = gates_diff + channels_ * num_ * 3;
Dtype* tanh_next_memory_diff = tanh_mem_buffer_->mutable_gpu_diff();
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BackwardGates<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
this->layer_param_.lstm_unit_param().tanh_hidden(),
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
input_gates_diff,
forget_gates_diff,
output_gates_diff,
input_values_diff,
tanh_next_memory_diff);
CUDA_POST_KERNEL_CHECK;
Dtype* input_weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* input_gate_weight_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* forget_gate_weight_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* output_gate_weight_diff = this->blobs_[3]->mutable_gpu_diff();
Dtype* input_diff = bottom[0]->mutable_gpu_diff();
Dtype* prev_state_diff = bottom[1]->mutable_gpu_diff();
const Dtype* next_hidden_state_diff = top[0]->gpu_diff();
const Dtype* next_memory_state_diff = top[1]->gpu_diff();
Dtype* next_state_tot_diff = next_state_tot_diff_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, output_gates,
next_hidden_state_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, tanh_next_memory_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_add(num_ * channels_, next_memory_state_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff,
forget_gates, prev_state_diff);
Dtype* dldg_data = dldg_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, input_gates, input_values_diff, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, input_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, input_gates_diff, input_values, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, input_gate_weight,
(Dtype)1., input_diff);
if (this->layer_param_.lstm_unit_param().tie_output_forget()) {
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)-1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)-1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
} else {
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., forget_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, forget_gate_weight,
(Dtype)1., input_diff);
}
caffe_gpu_mul(num_ * channels_, output_gates_diff, tanh_next_memory_state,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_hidden_state_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmUnitLayer);
} // namespace caffe
| 597ad4bacaae5825049a9939c6715f689ff955e7.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_sigmoid_diff(Dtype x) {
return x * (1. - x);
}
template <typename Dtype>
__device__ Dtype cuda_tanh(Dtype x) {
Dtype exp2x = exp(2 * x);
return fabs(x) < Dtype(5) ? ((exp2x - Dtype(1)) / (exp2x + Dtype(1))) :
(x > 0 ? Dtype(1) : Dtype(-1));
}
template <typename Dtype>
__device__ Dtype cuda_tanh_diff(Dtype x) {
return (1. - x * x);
}
template <typename Dtype>
__global__ void ForwardCombineGates(
int n,
bool tanh_hidden,
const Dtype* prev_state_data,
Dtype* input_gates,
Dtype* forget_gates,
Dtype* output_gates,
Dtype* input_values,
Dtype* tanh_next_memory_state,
Dtype* next_memory_state,
Dtype* next_hidden_state) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates[idx] = cuda_sigmoid(input_gates[idx]);
forget_gates[idx] = cuda_sigmoid(forget_gates[idx]);
output_gates[idx] = cuda_sigmoid(output_gates[idx]);
input_values[idx] = cuda_tanh(input_values[idx]);
next_memory_state[idx] = prev_state_data[idx] * forget_gates[idx] +
input_gates[idx] * input_values[idx];
if (tanh_hidden) {
tanh_next_memory_state[idx] = cuda_tanh(next_memory_state[idx]);
} else {
tanh_next_memory_state[idx] = next_memory_state[idx];
}
next_hidden_state[idx] = tanh_next_memory_state[idx] * output_gates[idx];
}
}
template <typename Dtype>
__global__ void BackwardGates(
int n,
bool tanh_hidden,
const Dtype* input_gates,
const Dtype* forget_gates,
const Dtype* output_gates,
const Dtype* input_values,
const Dtype* tanh_next_memory_state,
Dtype* input_gates_diff,
Dtype* forget_gates_diff,
Dtype* output_gates_diff,
Dtype* input_values_diff,
Dtype* tanh_next_memory_diff) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates_diff[idx] = cuda_sigmoid_diff(input_gates[idx]);
forget_gates_diff[idx] = cuda_sigmoid_diff(forget_gates[idx]);
output_gates_diff[idx] = cuda_sigmoid_diff(output_gates[idx]);
input_values_diff[idx] = cuda_tanh_diff(input_values[idx]);
if (tanh_hidden) {
tanh_next_memory_diff[idx] = cuda_tanh_diff(tanh_next_memory_state[idx]);
} else {
tanh_next_memory_diff[idx] = Dtype(1.);
}
}
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* prev_state_data = bottom[1]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
Dtype* next_hidden_state = top[0]->mutable_gpu_data();
Dtype* next_memory_state = top[1]->mutable_gpu_data();
Dtype* input_gates = input_gates_data_buffer_->mutable_gpu_data();
Dtype* forget_gates = forget_gates_data_buffer_->mutable_gpu_data();
Dtype* output_gates = output_gates_data_buffer_->mutable_gpu_data();
Dtype* input_values = input_values_data_buffer_->mutable_gpu_data();
Dtype* tanh_next_memory_state = tanh_mem_buffer_->mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_weight,
(Dtype)0., input_values);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_gate_weight,
(Dtype)0., input_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, forget_gate_weight,
(Dtype)0., forget_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, output_gate_weight,
(Dtype)0., output_gates);
if (this->layer_param_.lstm_unit_param().tie_output_forget()) {
caffe_gpu_set(channels_ * num_, Dtype(0), forget_gates);
caffe_gpu_sub(channels_ * num_, forget_gates, output_gates, forget_gates);
}
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
ForwardCombineGates<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count,
this->layer_param_.lstm_unit_param().tanh_hidden(),
prev_state_data,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
next_memory_state,
next_hidden_state);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* prev_state_data = bottom[1]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
const Dtype* input_gates = input_gates_data_buffer_->gpu_data();
const Dtype* forget_gates = forget_gates_data_buffer_->gpu_data();
const Dtype* output_gates = output_gates_data_buffer_->gpu_data();
const Dtype* input_values = input_values_data_buffer_->gpu_data();
const Dtype* tanh_next_memory_state = tanh_mem_buffer_->gpu_data();
Dtype* gates_diff = gates_diff_buffer_->mutable_gpu_data();
Dtype* input_gates_diff = gates_diff + channels_ * num_ * 0;
Dtype* forget_gates_diff = gates_diff + channels_ * num_ * 1;
Dtype* output_gates_diff = gates_diff + channels_ * num_ * 2;
Dtype* input_values_diff = gates_diff + channels_ * num_ * 3;
Dtype* tanh_next_memory_diff = tanh_mem_buffer_->mutable_gpu_diff();
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
BackwardGates<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count,
this->layer_param_.lstm_unit_param().tanh_hidden(),
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
input_gates_diff,
forget_gates_diff,
output_gates_diff,
input_values_diff,
tanh_next_memory_diff);
CUDA_POST_KERNEL_CHECK;
Dtype* input_weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* input_gate_weight_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* forget_gate_weight_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* output_gate_weight_diff = this->blobs_[3]->mutable_gpu_diff();
Dtype* input_diff = bottom[0]->mutable_gpu_diff();
Dtype* prev_state_diff = bottom[1]->mutable_gpu_diff();
const Dtype* next_hidden_state_diff = top[0]->gpu_diff();
const Dtype* next_memory_state_diff = top[1]->gpu_diff();
Dtype* next_state_tot_diff = next_state_tot_diff_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, output_gates,
next_hidden_state_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, tanh_next_memory_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_add(num_ * channels_, next_memory_state_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff,
forget_gates, prev_state_diff);
Dtype* dldg_data = dldg_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, input_gates, input_values_diff, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, input_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, input_gates_diff, input_values, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, input_gate_weight,
(Dtype)1., input_diff);
if (this->layer_param_.lstm_unit_param().tie_output_forget()) {
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)-1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)-1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
} else {
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., forget_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, forget_gate_weight,
(Dtype)1., input_diff);
}
caffe_gpu_mul(num_ * channels_, output_gates_diff, tanh_next_memory_state,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_hidden_state_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_size_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_size_, channels_,
(Dtype)1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmUnitLayer);
} // namespace caffe
|
a6196cf3ca70178718ecd2091a8e1511f7e6cf4d.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -arch=sm_11 -m64 -O3 main.cu -o stream.bin
#include<iostream>
#include<cstdlib>
#include <hip/hip_runtime.h>
#include <cassert>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#include <ctime>
#define DOMINO 4096
#define BLOCOS 8
#define STREAM 4
#define CHECK_ERROR(call) do { \
if( hipSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
hipGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
/*
*************************************************************************
unsigned int width = gridDim.x * blockDim.x;
unsigned int height = gridDim.y * blockDim.y;
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int kn = y * width + x;
*************************************************************************
N blocks x M threads <---- IPC
*/
__global__ void setup_kernel(const unsigned long long seed, hiprandState_t *state){
int x = blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init(seed, x, 0, &state[x]);
}
__global__ void uniform_kernel(hiprandState_t *state, float *h_y){
int x = blockDim.x * blockIdx.x + threadIdx.x;
h_y[x] = hiprand_uniform(&state[x]);
}
using namespace std;
int main (int argc, char **argv){
hipEvent_t e_Start,
e_Stop;
hipEvent_t e_StartS,
e_StopS;
hiprandState_t *mStates = NULL;
hipStream_t *mStreams = NULL;
float *h_Mem = NULL,
*d_Mem = NULL;
float elapsedTime = 0.0f,
elapsedTimeStream = 0.0f,
acc = 0.0f;
int dominio = DOMINO,
subdominio = DOMINO / STREAM;
unsigned int qtdeDados = DOMINO * sizeof(float);
size_t free = 0,
total = 0;
hipDeviceProp_t deviceProp; //Levantar a capacidade do device
hipGetDeviceProperties(&deviceProp, 0);
cout << "\nStream de numeros aleatorios\n";
CHECK_ERROR(hipMemGetInfo(&free, &total));
cout << "Memoria livre: " << (free / 1024 / 1024) << " MB\n";
cout << "Memoria total: " << (total / 1024 / 1024) << " MB\n";
cout << "Dominio: " << dominio << endl;
//Reset no device
CHECK_ERROR(hipDeviceReset());
//Criando eventos
CHECK_ERROR(hipEventCreate(&e_Start));
CHECK_ERROR(hipEventCreate(&e_Stop));
CHECK_ERROR(hipEventCreate(&e_StartS));
CHECK_ERROR(hipEventCreate(&e_StopS));
mStreams = new hipStream_t[STREAM];
for (int i = 0; i < STREAM; i++)
CHECK_ERROR(hipStreamCreate(&mStreams[i]));
//Alocando memoria CPU "no-swap"
CHECK_ERROR(hipHostMalloc(reinterpret_cast<void**> (&h_Mem), qtdeDados));
//alocando memria em GPU
CHECK_ERROR(hipMalloc(reinterpret_cast<void**> (&d_Mem), qtdeDados));
CHECK_ERROR(hipMalloc(reinterpret_cast<void**> (&mStates), dominio * sizeof(hiprandState_t)));
int blocos = BLOCOS,
threads = subdominio / BLOCOS;
cout << "Blocos: " << blocos << endl;
cout << "Threads: " << threads << endl;
assert( threads <= deviceProp.maxThreadsDim[0]);
CHECK_ERROR(hipEventRecord(e_Start, hipEventDefault));
for (int i = 0; i < STREAM; i++){
CHECK_ERROR(hipEventRecord(e_StartS, mStreams[i]));
int offset = subdominio * i;
hipLaunchKernelGGL(( setup_kernel), dim3(blocos), dim3(threads),0, mStreams[i] , time (NULL) + offset, mStates+offset);
hipLaunchKernelGGL(( uniform_kernel), dim3(blocos), dim3(threads),0, mStreams[i], mStates+offset, d_Mem + offset);
CHECK_ERROR(hipMemcpyAsync(h_Mem + offset, d_Mem + offset, subdominio * sizeof(float), hipMemcpyDeviceToHost, mStreams[i] ));
}
for (int i = 0; i < STREAM; i++){
CHECK_ERROR( hipStreamSynchronize(mStreams[i]) );
CHECK_ERROR(hipEventRecord(e_StopS, mStreams[i]));
CHECK_ERROR(hipEventSynchronize(e_StopS));
CHECK_ERROR(hipEventElapsedTime(&elapsedTimeStream, e_StartS, e_StopS));
cout << "Stream: " << i << " tempo: " << (elapsedTimeStream / 1000.0f) << " (s) \n";
acc += elapsedTimeStream;
}
cout << "Acumulado: " << acc / 1000.0f << " (s) \n";
CHECK_ERROR(hipEventRecord(e_Stop, hipEventDefault));
CHECK_ERROR(hipEventSynchronize(e_Stop));
CHECK_ERROR(hipEventElapsedTime(&elapsedTime, e_Start, e_Stop));
cout << "Tempo: " << elapsedTime / 1000.0f << " (s) \n";
cout << endl << "Resultado: "<< endl;
for (int i = 0; i < dominio; i++)
cerr << h_Mem[i]<< endl;
cerr << endl;
CHECK_ERROR( hipFree(mStates) );
CHECK_ERROR( hipHostFree(h_Mem) ); //Liberando memorias GPU e CPU
CHECK_ERROR( hipFree(d_Mem) ); //Liberando memorias GPU e CPU
// for (int i = 0; i < STREAM; i++)
// CHECK_ERROR( hipStreamDestroy(mStreams[i]) );
delete mStreams;
CHECK_ERROR( hipEventDestroy (e_Start) );
CHECK_ERROR( hipEventDestroy (e_Stop) );
CHECK_ERROR( hipEventDestroy (e_StartS) );
CHECK_ERROR( hipEventDestroy (e_StopS) );
cout << "\nFIM\n";
return EXIT_SUCCESS;
}
| a6196cf3ca70178718ecd2091a8e1511f7e6cf4d.cu | //nvcc -arch=sm_11 -m64 -O3 main.cu -o stream.bin
#include<iostream>
#include<cstdlib>
#include <cuda_runtime.h>
#include <cassert>
#include <curand_kernel.h>
#include <curand.h>
#include <ctime>
#define DOMINO 4096
#define BLOCOS 8
#define STREAM 4
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
/*
*************************************************************************
unsigned int width = gridDim.x * blockDim.x;
unsigned int height = gridDim.y * blockDim.y;
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int kn = y * width + x;
*************************************************************************
N blocks x M threads <---- IPC
*/
__global__ void setup_kernel(const unsigned long long seed, curandState *state){
int x = blockDim.x * blockIdx.x + threadIdx.x;
curand_init(seed, x, 0, &state[x]);
}
__global__ void uniform_kernel(curandState *state, float *h_y){
int x = blockDim.x * blockIdx.x + threadIdx.x;
h_y[x] = curand_uniform(&state[x]);
}
using namespace std;
int main (int argc, char **argv){
cudaEvent_t e_Start,
e_Stop;
cudaEvent_t e_StartS,
e_StopS;
curandState *mStates = NULL;
cudaStream_t *mStreams = NULL;
float *h_Mem = NULL,
*d_Mem = NULL;
float elapsedTime = 0.0f,
elapsedTimeStream = 0.0f,
acc = 0.0f;
int dominio = DOMINO,
subdominio = DOMINO / STREAM;
unsigned int qtdeDados = DOMINO * sizeof(float);
size_t free = 0,
total = 0;
cudaDeviceProp deviceProp; //Levantar a capacidade do device
cudaGetDeviceProperties(&deviceProp, 0);
cout << "\nStream de numeros aleatorios\n";
CHECK_ERROR(cudaMemGetInfo(&free, &total));
cout << "Memoria livre: " << (free / 1024 / 1024) << " MB\n";
cout << "Memoria total: " << (total / 1024 / 1024) << " MB\n";
cout << "Dominio: " << dominio << endl;
//Reset no device
CHECK_ERROR(cudaDeviceReset());
//Criando eventos
CHECK_ERROR(cudaEventCreate(&e_Start));
CHECK_ERROR(cudaEventCreate(&e_Stop));
CHECK_ERROR(cudaEventCreate(&e_StartS));
CHECK_ERROR(cudaEventCreate(&e_StopS));
mStreams = new cudaStream_t[STREAM];
for (int i = 0; i < STREAM; i++)
CHECK_ERROR(cudaStreamCreate(&mStreams[i]));
//Alocando memoria CPU "no-swap"
CHECK_ERROR(cudaMallocHost(reinterpret_cast<void**> (&h_Mem), qtdeDados));
//alocando memória em GPU
CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_Mem), qtdeDados));
CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&mStates), dominio * sizeof(curandState)));
int blocos = BLOCOS,
threads = subdominio / BLOCOS;
cout << "Blocos: " << blocos << endl;
cout << "Threads: " << threads << endl;
assert( threads <= deviceProp.maxThreadsDim[0]);
CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault));
for (int i = 0; i < STREAM; i++){
CHECK_ERROR(cudaEventRecord(e_StartS, mStreams[i]));
int offset = subdominio * i;
setup_kernel<<<blocos, threads,0, mStreams[i] >>>(time (NULL) + offset, mStates+offset);
uniform_kernel<<<blocos, threads,0, mStreams[i]>>>(mStates+offset, d_Mem + offset);
CHECK_ERROR(cudaMemcpyAsync(h_Mem + offset, d_Mem + offset, subdominio * sizeof(float), cudaMemcpyDeviceToHost, mStreams[i] ));
}
for (int i = 0; i < STREAM; i++){
CHECK_ERROR( cudaStreamSynchronize(mStreams[i]) );
CHECK_ERROR(cudaEventRecord(e_StopS, mStreams[i]));
CHECK_ERROR(cudaEventSynchronize(e_StopS));
CHECK_ERROR(cudaEventElapsedTime(&elapsedTimeStream, e_StartS, e_StopS));
cout << "Stream: " << i << " tempo: " << (elapsedTimeStream / 1000.0f) << " (s) \n";
acc += elapsedTimeStream;
}
cout << "Acumulado: " << acc / 1000.0f << " (s) \n";
CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault));
CHECK_ERROR(cudaEventSynchronize(e_Stop));
CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, e_Start, e_Stop));
cout << "Tempo: " << elapsedTime / 1000.0f << " (s) \n";
cout << endl << "Resultado: "<< endl;
for (int i = 0; i < dominio; i++)
cerr << h_Mem[i]<< endl;
cerr << endl;
CHECK_ERROR( cudaFree(mStates) );
CHECK_ERROR( cudaFreeHost(h_Mem) ); //Liberando memorias GPU e CPU
CHECK_ERROR( cudaFree(d_Mem) ); //Liberando memorias GPU e CPU
// for (int i = 0; i < STREAM; i++)
// CHECK_ERROR( cudaStreamDestroy(mStreams[i]) );
delete mStreams;
CHECK_ERROR( cudaEventDestroy (e_Start) );
CHECK_ERROR( cudaEventDestroy (e_Stop) );
CHECK_ERROR( cudaEventDestroy (e_StartS) );
CHECK_ERROR( cudaEventDestroy (e_StopS) );
cout << "\nFIM\n";
return EXIT_SUCCESS;
}
|
317d531efd1747f4ab7c2a5f4ec7b93362b91e82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/contrib/GroupPoints.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/GroupPointsKernel.h"
using namespace open3d::ml::contrib;
void group_points_grad_launcher(int b,
int c,
int n,
int npoints,
int nsample,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( group_points_grad_kernel), dim3(blocks), dim3(threads), 0, stream,
b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void group_points_launcher(int b,
int c,
int n,
int npoints,
int nsample,
const float *points,
const int *idx,
float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( group_points_kernel), dim3(blocks), dim3(threads), 0, stream,
b, c, n, npoints, nsample, points, idx, out);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 317d531efd1747f4ab7c2a5f4ec7b93362b91e82.cu | //***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/contrib/GroupPoints.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/GroupPointsKernel.h"
using namespace open3d::ml::contrib;
void group_points_grad_launcher(int b,
int c,
int n,
int npoints,
int nsample,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void group_points_launcher(int b,
int c,
int n,
int npoints,
int nsample,
const float *points,
const int *idx,
float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, nsample, points, idx, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
60f04b9bac8fbccb3463d1aa388d99bfa8ffb443.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <rmm/cuda_stream_view.hpp>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
#define wrapper cudf::test::fixed_width_column_wrapper
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// output
std::unique_ptr<cudf::column> out =
cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_view = cudf::column_device_view::create(lhs);
auto rhs_view = cudf::column_device_view::create(rhs);
auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view);
auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
hipLaunchKernelGGL(( cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>)
, dim3(1), dim3(32), 0, stream.value(), lhs_iter, rhs_iter, filter, *out_dv, nullptr);
return out;
}
template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Unexpected test execution");
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__(cudf::size_type i) {
return bool_mask_device.element<bool>(i);
};
return cudf::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::cuda_stream_default,
rmm::mr::get_current_device_resource());
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
lhs_v);
bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6},
rhs_v);
bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
exp_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{};
wrapper<T> lhs_w{};
wrapper<T> rhs_w{};
wrapper<T> expected_w{};
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
// mask length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
// column length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
}
template <typename T>
struct CopyTestNumeric : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes);
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6});
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
bool mask_v[] = {1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v);
const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5});
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v);
cudf::numeric_scalar<T> rhs_w(6);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6});
wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
cudf::numeric_scalar<T> rhs_w(6, false);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
template <typename T>
struct create_chrono_scalar {
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value,
cudf::timestamp_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::timestamp_scalar<T>(std::forward<Args>(args)...);
}
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value,
cudf::duration_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::duration_scalar<T>(std::forward<Args>(args)...);
}
};
template <typename T>
struct CopyTestChrono : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes);
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct CopyTestUntyped : public cudf::test::BaseFixture {
};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<float> lhs_w{5, 5, 5, 5};
wrapper<int32_t> rhs_w{6, 6, 6, 6};
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", ""};
cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids);
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 1, 0, 1, 0, 1};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) {
if (mask[idx] and mask_v[idx])
h_expected.push_back(h_strings1[idx]);
else
h_expected.push_back(h_strings2[idx]);
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 0, 1, 0, 1, 0};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx] and mask_v[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_strings2[idx]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {0, 1, 1, 1, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6);
auto results = cudf::copy_if_else(strings2, strings1, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_strings2[idx]);
} else {
h_expected.push_back(h_string1[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar string1{h_string1[0]};
std::vector<const char*> h_string2{"aaa"};
cudf::string_scalar string2{h_string2[0], false};
constexpr cudf::size_type mask_size = 6;
bool mask[] = {1, 0, 1, 0, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size);
auto results = cudf::copy_if_else(string1, string2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_string2[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
template <typename T>
struct FixedPointTypes : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes);
TYPED_TEST(FixedPointTypes, FixedPointSimple)
{
using namespace numeric;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0};
auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}};
auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}};
auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}};
auto const result = cudf::copy_if_else(a, b, mask);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}
TYPED_TEST(FixedPointTypes, FixedPointLarge)
{
using namespace numeric;
using namespace cudf::test;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto a = thrust::make_counting_iterator(-1000);
auto b = thrust::make_constant_iterator(0);
auto m = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return i > 0; });
auto e =
cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return ::max(0, i); });
auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000);
auto const A = fp_wrapper{a, a + 2000, scale_type{-3}};
auto const B = fp_wrapper{b, b + 2000, scale_type{-3}};
auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}};
auto const result = cudf::copy_if_else(A, B, mask);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}
TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch)
{
using namespace numeric;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0};
auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}};
auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}};
EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error);
}
| 60f04b9bac8fbccb3463d1aa388d99bfa8ffb443.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <rmm/cuda_stream_view.hpp>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
#define wrapper cudf::test::fixed_width_column_wrapper
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// output
std::unique_ptr<cudf::column> out =
cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_view = cudf::column_device_view::create(lhs);
auto rhs_view = cudf::column_device_view::create(rhs);
auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view);
auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>
<<<1, 32, 0, stream.value()>>>(lhs_iter, rhs_iter, filter, *out_dv, nullptr);
return out;
}
template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Unexpected test execution");
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs,
cudf::column_view const& rhs,
cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__(cudf::size_type i) {
return bool_mask_device.element<bool>(i);
};
return cudf::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::cuda_stream_default,
rmm::mr::get_current_device_resource());
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5});
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
lhs_v);
bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6},
rhs_v);
bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
exp_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{};
wrapper<T> lhs_w{};
wrapper<T> rhs_w{};
wrapper<T> expected_w{};
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1});
wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4)
{
using T = TypeParam;
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
wrapper<T, int32_t> expected_w({5, 6, 5, 5});
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
// mask length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
// column length mismatch
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5});
wrapper<T, int32_t> rhs_w({6, 6, 6, 6});
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
}
template <typename T>
struct CopyTestNumeric : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes);
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6});
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
bool mask_v[] = {1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v);
const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5});
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v);
cudf::numeric_scalar<T> rhs_w(6);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6});
wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
cudf::numeric_scalar<T> lhs_w(5);
cudf::numeric_scalar<T> rhs_w(6, false);
const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5});
wrapper<T> expected_w(expected.begin(), expected.end(), mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
template <typename T>
struct create_chrono_scalar {
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value,
cudf::timestamp_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::timestamp_scalar<T>(std::forward<Args>(args)...);
}
template <typename ChronoT = T, typename... Args>
typename std::enable_if_t<
std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value,
cudf::duration_scalar<ChronoT>>
operator()(Args&&... args) const
{
return cudf::duration_scalar<T>(std::forward<Args>(args)...);
}
};
template <typename T>
struct CopyTestChrono : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes);
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
bool rhs_v[] = {1, 0, 1, 1};
wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
bool lhs_v[] = {0, 1, 1, 1};
wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = {1, 0, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els);
auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true);
auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false);
wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask);
auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w);
}
struct CopyTestUntyped : public cudf::test::BaseFixture {
};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1};
wrapper<float> lhs_w{5, 5, 5, 5};
wrapper<int32_t> rhs_w{6, 6, 6, 6};
EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", "ééé"};
cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids);
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 1, 0, 1, 0, 1};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) {
if (mask[idx] and mask_v[idx])
h_expected.push_back(h_strings1[idx]);
else
h_expected.push_back(h_strings2[idx]);
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {1, 0, 1, 0, 1, 0};
bool mask_v[] = {1, 1, 1, 1, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v);
auto results = cudf::copy_if_else(strings1, strings2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx] and mask_v[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_strings2[idx]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar strings1{h_string1[0]};
std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids);
bool mask[] = {0, 1, 1, 1, 0, 1};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6);
auto results = cudf::copy_if_else(strings2, strings1, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_strings2[idx]);
} else {
h_expected.push_back(h_string1[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
std::vector<const char*> h_string1{"eee"};
cudf::string_scalar string1{h_string1[0]};
std::vector<const char*> h_string2{"aaa"};
cudf::string_scalar string2{h_string2[0], false};
constexpr cudf::size_type mask_size = 6;
bool mask[] = {1, 0, 1, 0, 1, 0};
cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size);
auto results = cudf::copy_if_else(string1, string2, mask_w);
std::vector<const char*> h_expected;
for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) {
if (mask[idx]) {
h_expected.push_back(h_string1[0]);
} else {
h_expected.push_back(h_string2[0]);
}
}
cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
template <typename T>
struct FixedPointTypes : public cudf::test::BaseFixture {
};
TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes);
TYPED_TEST(FixedPointTypes, FixedPointSimple)
{
using namespace numeric;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0};
auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}};
auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}};
auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}};
auto const result = cudf::copy_if_else(a, b, mask);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}
TYPED_TEST(FixedPointTypes, FixedPointLarge)
{
using namespace numeric;
using namespace cudf::test;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto a = thrust::make_counting_iterator(-1000);
auto b = thrust::make_constant_iterator(0);
auto m = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return i > 0; });
auto e =
cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return std::max(0, i); });
auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000);
auto const A = fp_wrapper{a, a + 2000, scale_type{-3}};
auto const B = fp_wrapper{b, b + 2000, scale_type{-3}};
auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}};
auto const result = cudf::copy_if_else(A, B, mask);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view());
}
TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch)
{
using namespace numeric;
using decimalXX = TypeParam;
using RepType = cudf::device_storage_type_t<decimalXX>;
using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>;
auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0};
auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}};
auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}};
EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error);
}
|
28e6f1b85d07f19c1289d8d20aca32698e7059f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
static __global__ void debug_kernel(bool* has_debug)
{
// Verify using the return code if we have GPU debug flag enabled
#if defined(__HIPCC__) && defined(__CUDACC_DEBUG__)
*has_debug = true;
#else
*has_debug = false;
#endif
}
int choose_cuda_device()
{
int nDevices = 0;
hipError_t err = hipGetDeviceCount(&nDevices);
if (err != hipSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
hipDeviceProp_t prop;
hipError_t err = hipGetDeviceProperties(&prop, i);
if (err != hipSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major >= 3) {
err = hipSetDevice(i);
if (err != hipSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.0"
<< std::endl;
return 1;
}
int main(int argc, char** argv)
{
bool* has_debug;
hipError_t err = hipMallocManaged(&has_debug, sizeof(bool));
if (err != hipSuccess) {
std::cerr << "hipMallocManaged failed:\n"
<< " " << hipGetErrorString(err) << std::endl;
return 1;
}
hipLaunchKernelGGL(( debug_kernel), dim3(1), dim3(1), 0, 0, has_debug);
err = hipDeviceSynchronize();
if (err != hipSuccess) {
std::cerr << "debug_kernel: kernel launch shouldn't have failed\n"
<< "reason:\t" << hipGetErrorString(err) << std::endl;
return 1;
}
if (*has_debug == false) {
std::cerr << "debug_kernel: kernel not compiled with device debug"
<< std::endl;
return 1;
}
return 0;
}
| 28e6f1b85d07f19c1289d8d20aca32698e7059f4.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
static __global__ void debug_kernel(bool* has_debug)
{
// Verify using the return code if we have GPU debug flag enabled
#if defined(__CUDACC__) && defined(__CUDACC_DEBUG__)
*has_debug = true;
#else
*has_debug = false;
#endif
}
int choose_cuda_device()
{
int nDevices = 0;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << "Failed to retrieve the number of CUDA enabled devices"
<< std::endl;
return 1;
}
for (int i = 0; i < nDevices; ++i) {
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess) {
std::cerr << "Could not retrieve properties from CUDA device " << i
<< std::endl;
return 1;
}
if (prop.major >= 3) {
err = cudaSetDevice(i);
if (err != cudaSuccess) {
std::cout << "Could not select CUDA device " << i << std::endl;
} else {
return 0;
}
}
}
std::cout << "Could not find a CUDA enabled card supporting compute >=3.0"
<< std::endl;
return 1;
}
int main(int argc, char** argv)
{
bool* has_debug;
cudaError_t err = cudaMallocManaged(&has_debug, sizeof(bool));
if (err != cudaSuccess) {
std::cerr << "cudaMallocManaged failed:\n"
<< " " << cudaGetErrorString(err) << std::endl;
return 1;
}
debug_kernel<<<1, 1>>>(has_debug);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cerr << "debug_kernel: kernel launch shouldn't have failed\n"
<< "reason:\t" << cudaGetErrorString(err) << std::endl;
return 1;
}
if (*has_debug == false) {
std::cerr << "debug_kernel: kernel not compiled with device debug"
<< std::endl;
return 1;
}
return 0;
}
|
f8a096dc317350f7626f886390628b5824cb515c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SIZE 1024
__global__ void VectorAdd( int * a, int *b, int* c, int n)
{
int i = threadIdx.x;
if(i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b , * c;
int *d_a, *d_b, *d_c;
//allocate space for all gpu and cpu data
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
hipMalloc(&d_a, SIZE * sizeof(int));
hipMalloc(&d_b, SIZE * sizeof(int));
hipMalloc(&d_c, SIZE * sizeof(int));
//init all gpu and cpu data
for(int i =0; i < SIZE;++i){
a[i] = i;
b[i] = i;
c[i] = 0;
}
hipMemcpy(d_a, a, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VectorAdd), dim3(1), dim3(SIZE), 0, 0, d_a,d_b,d_c,SIZE);
//get the gpu data out to cpu
hipMemcpy(c, d_c, SIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 10;++i){
printf("c[%d] = %d \n", i, c[i]);
}
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | f8a096dc317350f7626f886390628b5824cb515c.cu | #include <stdio.h>
#define SIZE 1024
__global__ void VectorAdd( int * a, int *b, int* c, int n)
{
int i = threadIdx.x;
if(i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b , * c;
int *d_a, *d_b, *d_c;
//allocate space for all gpu and cpu data
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
//init all gpu and cpu data
for(int i =0; i < SIZE;++i){
a[i] = i;
b[i] = i;
c[i] = 0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<< 1, SIZE>>>(d_a,d_b,d_c,SIZE);
//get the gpu data out to cpu
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10;++i){
printf("c[%d] = %d \n", i, c[i]);
}
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
cffc929796347e70d3e668d6824d7196a3697b01.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime_api.h>
#include "dali/pipeline/operator/builtin/copy.h"
namespace dali {
template<>
void Copy<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
output.set_type(input.type());
output.SetLayout(input.GetLayout());
output.ResizeLike(input);
CUDA_CALL(hipMemcpyAsync(
output.raw_mutable_data(),
input.raw_data(),
input.nbytes(),
hipMemcpyDeviceToDevice,
ws.stream()));
}
DALI_REGISTER_OPERATOR(Copy, Copy<GPUBackend>, GPU);
} // namespace dali
| cffc929796347e70d3e668d6824d7196a3697b01.cu | // Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime_api.h>
#include "dali/pipeline/operator/builtin/copy.h"
namespace dali {
template<>
void Copy<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
output.set_type(input.type());
output.SetLayout(input.GetLayout());
output.ResizeLike(input);
CUDA_CALL(cudaMemcpyAsync(
output.raw_mutable_data(),
input.raw_data(),
input.nbytes(),
cudaMemcpyDeviceToDevice,
ws.stream()));
}
DALI_REGISTER_OPERATOR(Copy, Copy<GPUBackend>, GPU);
} // namespace dali
|
6bcb40500404d3803df26552713b2107dc253781.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/GaborOrientationFilter.cu"
#else
int cugcn_(GOF_Producing)(
THCTensor *weight,
THCTensor *gaborFilterBank,
THCTensor *output)
{
THCUNN_assertSameGPU(state, 3, weight, gaborFilterBank, output);
THArgCheck(weight->nDimension == 5, 1, "only supports a batch of GOFs.");
const short nOutputPlane = weight->size[0];
const uint16 nInputPlane = weight->size[1];
const uint8 nChannel = weight->size[2];
const uint8 kH = weight->size[3];
const uint8 kW = weight->size[4];
THCTensor_(resize4d)(state, output, nOutputPlane * nChannel, nInputPlane * nChannel, kH, kW);
real *weightData = THCTensor_(data)(state, weight);
real *gaborFilterBankData = THCTensor_(data)(state, gaborFilterBank);
real *outputData = THCTensor_(data)(state, output);
const uint16 nEntry = nChannel * kH * kW;
const uint32 count = nOutputPlane * nInputPlane * nEntry;
kernel_(GaborProducing)(
THCState_getCurrentStream(state),
count,
weightData,
gaborFilterBankData,
nInputPlane,
nOutputPlane,
nChannel,
nEntry,
outputData);
THCudaCheck(hipGetLastError());
return 1;
}
int cugcn_(GOF_BPAlign)(
THCTensor *weight,
THCTensor *gaborFilterBank,
THCTensor *gradWeight)
{
THCUNN_assertSameGPU(state, 3, weight, gaborFilterBank, gradWeight);
const uint8 nChannel = gaborFilterBank->size[0];
const uint16 kH = gradWeight->size[2];;
const uint16 kW = gradWeight->size[3];;
const uint16 nOutputPlane = gradWeight->size[0] / nChannel;
const uint16 nInputPlane = gradWeight->size[1] / nChannel;
THCTensor_(resize5d)(state, weight, nOutputPlane, nInputPlane, nChannel, kH, kW);
real *weightData = THCTensor_(data)(state, weight);
real *gaborFilterBankData = THCTensor_(data)(state, gaborFilterBank);
real *gradWeightData = THCTensor_(data)(state, gradWeight);
const uint16 nEntry = nChannel * kH * kW;
const uint32 count = nOutputPlane * nInputPlane * nEntry;
kernel_(BPAlign)(
THCState_getCurrentStream(state),
count,
gradWeightData,
nInputPlane,
nOutputPlane,
nChannel,
kH,
kW,
weightData,
gaborFilterBankData);
THCudaCheck(hipGetLastError());
return 1;
}
#endif | 6bcb40500404d3803df26552713b2107dc253781.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/GaborOrientationFilter.cu"
#else
int cugcn_(GOF_Producing)(
THCTensor *weight,
THCTensor *gaborFilterBank,
THCTensor *output)
{
THCUNN_assertSameGPU(state, 3, weight, gaborFilterBank, output);
THArgCheck(weight->nDimension == 5, 1, "only supports a batch of GOFs.");
const short nOutputPlane = weight->size[0];
const uint16 nInputPlane = weight->size[1];
const uint8 nChannel = weight->size[2];
const uint8 kH = weight->size[3];
const uint8 kW = weight->size[4];
THCTensor_(resize4d)(state, output, nOutputPlane * nChannel, nInputPlane * nChannel, kH, kW);
real *weightData = THCTensor_(data)(state, weight);
real *gaborFilterBankData = THCTensor_(data)(state, gaborFilterBank);
real *outputData = THCTensor_(data)(state, output);
const uint16 nEntry = nChannel * kH * kW;
const uint32 count = nOutputPlane * nInputPlane * nEntry;
kernel_(GaborProducing)(
THCState_getCurrentStream(state),
count,
weightData,
gaborFilterBankData,
nInputPlane,
nOutputPlane,
nChannel,
nEntry,
outputData);
THCudaCheck(cudaGetLastError());
return 1;
}
int cugcn_(GOF_BPAlign)(
THCTensor *weight,
THCTensor *gaborFilterBank,
THCTensor *gradWeight)
{
THCUNN_assertSameGPU(state, 3, weight, gaborFilterBank, gradWeight);
const uint8 nChannel = gaborFilterBank->size[0];
const uint16 kH = gradWeight->size[2];;
const uint16 kW = gradWeight->size[3];;
const uint16 nOutputPlane = gradWeight->size[0] / nChannel;
const uint16 nInputPlane = gradWeight->size[1] / nChannel;
THCTensor_(resize5d)(state, weight, nOutputPlane, nInputPlane, nChannel, kH, kW);
real *weightData = THCTensor_(data)(state, weight);
real *gaborFilterBankData = THCTensor_(data)(state, gaborFilterBank);
real *gradWeightData = THCTensor_(data)(state, gradWeight);
const uint16 nEntry = nChannel * kH * kW;
const uint32 count = nOutputPlane * nInputPlane * nEntry;
kernel_(BPAlign)(
THCState_getCurrentStream(state),
count,
gradWeightData,
nInputPlane,
nOutputPlane,
nChannel,
kH,
kW,
weightData,
gaborFilterBankData);
THCudaCheck(cudaGetLastError());
return 1;
}
#endif |
944b116195f2c7f7fb5f34b09f3370b41047b576.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "merge_without_blend_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *srcimg = NULL;
hipMalloc(&srcimg, XSIZE*YSIZE);
float *targetimg = NULL;
hipMalloc(&targetimg, XSIZE*YSIZE);
float *outimg = NULL;
hipMalloc(&outimg, XSIZE*YSIZE);
int *boundary_array = NULL;
hipMalloc(&boundary_array, XSIZE*YSIZE);
int source_nchannel = 1;
int source_width = XSIZE;
int source_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
merge_without_blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
merge_without_blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
merge_without_blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 944b116195f2c7f7fb5f34b09f3370b41047b576.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "merge_without_blend_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *srcimg = NULL;
cudaMalloc(&srcimg, XSIZE*YSIZE);
float *targetimg = NULL;
cudaMalloc(&targetimg, XSIZE*YSIZE);
float *outimg = NULL;
cudaMalloc(&outimg, XSIZE*YSIZE);
int *boundary_array = NULL;
cudaMalloc(&boundary_array, XSIZE*YSIZE);
int source_nchannel = 1;
int source_width = XSIZE;
int source_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
merge_without_blend_kernel<<<gridBlock,threadBlock>>>(srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
merge_without_blend_kernel<<<gridBlock,threadBlock>>>(srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
merge_without_blend_kernel<<<gridBlock,threadBlock>>>(srcimg,targetimg,outimg,boundary_array,source_nchannel,source_width,source_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8052de3108484d6d61b839759aa3860a9ca6e3e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
//#include "../stream_compaction/efficient.h"
#include <thrust/partition.h>
#include <thrust/execution_policy.h>
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char* msg, const char* file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene* hst_scene = NULL;
static glm::vec3* dev_image = NULL;
static Geom* dev_geoms = NULL;
static Material* dev_materials = NULL;
static PathSegment* dev_paths = NULL;
static ShadeableIntersection* dev_intersections = NULL;
static int* dev_Stencil;
static PathSegment* dev_cache_paths = NULL;
static ShadeableIntersection* dev_cache_intersections = NULL;
static bool cacheAvailable = false;
int cacheNumPaths = 0;
// TODO: static variables for device memory, any extra info you need, etc
// ...
bool usingCache = true;
bool usingDOF = false;
bool useBVH = true;
void pathtraceInit(Scene* scene) {
hst_scene = scene;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
for (int i = 0; i < scene->geoms.size(); i++)
{
if (scene->geoms[i].type == OBJ)
{
Geom& currGeom = scene->geoms[i];
hipMalloc(&currGeom.Device_Triangle_points_normals, 6 * currGeom.triangleCount * sizeof(glm::vec4));
hipMemcpy(currGeom.Device_Triangle_points_normals, currGeom.Host_Triangle_points_normals, 6 * currGeom.triangleCount * sizeof(glm::vec4), hipMemcpyHostToDevice);
//Copy Bound Volume Data
for (int i = 0; i < 14; i++)
{
std::cout << currGeom.Host_BVH[i] << "\n";
}
hipMalloc(&currGeom.Device_BVH, 14 * sizeof(float));
hipMemcpy(currGeom.Device_BVH, currGeom.Host_BVH, 14 * sizeof(float), hipMemcpyHostToDevice);
}
}
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_Stencil, pixelcount * sizeof(int));
hipMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_cache_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Only Handles for one mesh Structore right now need to add functionality for more afterwards
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_Stencil);
// TODO: clean up any extra device memory you created
hipFree(dev_cache_paths);
hipFree(dev_cache_intersections);
checkCUDAError("pathtraceFree");
}
__device__ glm::vec3 random_in_unit_disk(thrust::default_random_engine& rng) {
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec3 p = glm::vec3(u01(rng), u01(rng), 0);
p = glm::normalize(p);
return p;
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, bool usingCache, bool usingDOF)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment& segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
if (!usingCache)
{
/// Using Anti ALiasing with Jittering
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng))
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + +u01(rng))
);
}
else
{
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
//AA plus DOF
if (usingDOF)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
double lens_radius = cam.aperture / 2;
glm::vec3 rd = float(lens_radius) * random_in_unit_disk(rng);
glm::vec3 offset = cam.up * rd.y + cam.right * rd.x;
glm::vec3 rayOrigin = cam.position + offset; // NEw origin
//Focal Point
glm::vec3 focalPoint = segment.ray.origin + (float)cam.focus_dist * segment.ray.direction;
segment.ray.origin = rayOrigin;
segment.ray.direction = glm::normalize(focalPoint - rayOrigin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment* pathSegments
, Geom* geoms
, int geoms_size
, ShadeableIntersection* intersections
, bool useBVH)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom& geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == OBJ)
{
if (useBVH)
{
if (intersect(pathSegment.ray, geom))
{
t = MeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
}
else
{
t = MeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial(
int iter
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
pathSegments[idx].remainingBounces -= 1;
glm::vec3 intersectPt = getPointOnRay(pathSegments[idx].ray, intersection.t);
//scatterRay(pathSegments[idx], intersectPt, intersection.surfaceNormal, material, rng);
/* if (material.hasRefractive > 0.0f)
{
pathSegments[idx].color = pathSegments[idx].color;
}
else
{
pathSegments[idx].color *= materialColor;
}*/
pathSegments[idx].color *= materialColor;
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
__global__ void shadeBSDFMaterial(
int iter
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials, int depth, Camera cam
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
pathSegments[idx].remainingBounces -= 1;
glm::vec3 intersectPt = getPointOnRay(pathSegments[idx].ray, intersection.t);
scatterRay(pathSegments[idx], intersectPt, intersection.surfaceNormal, material, rng, cam);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3* image, PathSegment* iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct hasTerminated
{
__host__ __device__
bool operator()(const int& x)
{
return x == 1;
}
};
__global__ void CompactionStencil(int nPaths, PathSegment* iterationPaths, int* dev_Stencil)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
if (iterationPaths[index].remainingBounces == 0)
{
dev_Stencil[index] = 0;
return;
}
dev_Stencil[index] = 1;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void SetCacheState(bool a_state)
{
cacheAvailable = a_state;
}
struct ShadeableIntersectionComparator
{
__host__ __device__
inline bool operator() (const ShadeableIntersection& a, const ShadeableIntersection& b)
{
return a.materialId < b.materialId;
}
};
void pathtrace(uchar4* pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
int depth = 0;
if (usingCache)
{
if (!cacheAvailable)
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, usingCache, usingDOF);
checkCUDAError("generate camera ray");
}
else
{
hipMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
hipMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
depth = 1;
}
}
else
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, usingCache, usingDOF);
checkCUDAError("generate camera ray");
}
hipDeviceSynchronize();
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, useBVH);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
if (!cacheAvailable && usingCache)
{
hipMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
hipMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
SetCacheState(true);
hipDeviceSynchronize();
}
#pragma region SortingMaterial
//thrust::device_ptr<ShadeableIntersection> dev_thrust_intersections = thrust::device_ptr<ShadeableIntersection>(dev_intersections);
//thrust::device_ptr<PathSegment> dev_thrust_PathSegment = thrust::device_ptr<PathSegment>(dev_paths);
//thrust::sort_by_key(thrust::device, dev_thrust_intersections, dev_thrust_intersections + num_paths, dev_thrust_PathSegment, ShadeableIntersectionComparator());
#pragma endregion
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
const Camera *camcpy = &cam;
shadeBSDFMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials, depth, *camcpy
);
CompactionStencil << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths,
dev_paths, dev_Stencil);
PathSegment* itr = thrust::stable_partition(thrust::device, dev_paths, dev_paths + num_paths, dev_Stencil, hasTerminated());
int n = itr - dev_paths;
num_paths = n;
if (num_paths == 0)
{
iterationComplete = true; // TODO: should be based off stream compaction results.
}
//iterationComplete = true; // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 8052de3108484d6d61b839759aa3860a9ca6e3e8.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
//#include "../stream_compaction/efficient.h"
#include <thrust/partition.h>
#include <thrust/execution_policy.h>
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char* msg, const char* file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene* hst_scene = NULL;
static glm::vec3* dev_image = NULL;
static Geom* dev_geoms = NULL;
static Material* dev_materials = NULL;
static PathSegment* dev_paths = NULL;
static ShadeableIntersection* dev_intersections = NULL;
static int* dev_Stencil;
static PathSegment* dev_cache_paths = NULL;
static ShadeableIntersection* dev_cache_intersections = NULL;
static bool cacheAvailable = false;
int cacheNumPaths = 0;
// TODO: static variables for device memory, any extra info you need, etc
// ...
bool usingCache = true;
bool usingDOF = false;
bool useBVH = true;
void pathtraceInit(Scene* scene) {
hst_scene = scene;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
for (int i = 0; i < scene->geoms.size(); i++)
{
if (scene->geoms[i].type == OBJ)
{
Geom& currGeom = scene->geoms[i];
cudaMalloc(&currGeom.Device_Triangle_points_normals, 6 * currGeom.triangleCount * sizeof(glm::vec4));
cudaMemcpy(currGeom.Device_Triangle_points_normals, currGeom.Host_Triangle_points_normals, 6 * currGeom.triangleCount * sizeof(glm::vec4), cudaMemcpyHostToDevice);
//Copy Bound Volume Data
for (int i = 0; i < 14; i++)
{
std::cout << currGeom.Host_BVH[i] << "\n";
}
cudaMalloc(&currGeom.Device_BVH, 14 * sizeof(float));
cudaMemcpy(currGeom.Device_BVH, currGeom.Host_BVH, 14 * sizeof(float), cudaMemcpyHostToDevice);
}
}
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_Stencil, pixelcount * sizeof(int));
cudaMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_cache_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Only Handles for one mesh Structore right now need to add functionality for more afterwards
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_Stencil);
// TODO: clean up any extra device memory you created
cudaFree(dev_cache_paths);
cudaFree(dev_cache_intersections);
checkCUDAError("pathtraceFree");
}
__device__ glm::vec3 random_in_unit_disk(thrust::default_random_engine& rng) {
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec3 p = glm::vec3(u01(rng), u01(rng), 0);
p = glm::normalize(p);
return p;
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, bool usingCache, bool usingDOF)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment& segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
if (!usingCache)
{
/// Using Anti ALiasing with Jittering
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + u01(rng))
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + +u01(rng))
);
}
else
{
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
}
//AA plus DOF
if (usingDOF)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
double lens_radius = cam.aperture / 2;
glm::vec3 rd = float(lens_radius) * random_in_unit_disk(rng);
glm::vec3 offset = cam.up * rd.y + cam.right * rd.x;
glm::vec3 rayOrigin = cam.position + offset; // NEw origin
//Focal Point
glm::vec3 focalPoint = segment.ray.origin + (float)cam.focus_dist * segment.ray.direction;
segment.ray.origin = rayOrigin;
segment.ray.direction = glm::normalize(focalPoint - rayOrigin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment* pathSegments
, Geom* geoms
, int geoms_size
, ShadeableIntersection* intersections
, bool useBVH)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom& geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == OBJ)
{
if (useBVH)
{
if (intersect(pathSegment.ray, geom))
{
t = MeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
}
else
{
t = MeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial(
int iter
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
pathSegments[idx].remainingBounces -= 1;
glm::vec3 intersectPt = getPointOnRay(pathSegments[idx].ray, intersection.t);
//scatterRay(pathSegments[idx], intersectPt, intersection.surfaceNormal, material, rng);
/* if (material.hasRefractive > 0.0f)
{
pathSegments[idx].color = pathSegments[idx].color;
}
else
{
pathSegments[idx].color *= materialColor;
}*/
pathSegments[idx].color *= materialColor;
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
__global__ void shadeBSDFMaterial(
int iter
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials, int depth, Camera cam
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
pathSegments[idx].remainingBounces -= 1;
glm::vec3 intersectPt = getPointOnRay(pathSegments[idx].ray, intersection.t);
scatterRay(pathSegments[idx], intersectPt, intersection.surfaceNormal, material, rng, cam);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3* image, PathSegment* iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct hasTerminated
{
__host__ __device__
bool operator()(const int& x)
{
return x == 1;
}
};
__global__ void CompactionStencil(int nPaths, PathSegment* iterationPaths, int* dev_Stencil)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
if (iterationPaths[index].remainingBounces == 0)
{
dev_Stencil[index] = 0;
return;
}
dev_Stencil[index] = 1;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void SetCacheState(bool a_state)
{
cacheAvailable = a_state;
}
struct ShadeableIntersectionComparator
{
__host__ __device__
inline bool operator() (const ShadeableIntersection& a, const ShadeableIntersection& b)
{
return a.materialId < b.materialId;
}
};
void pathtrace(uchar4* pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
int depth = 0;
if (usingCache)
{
if (!cacheAvailable)
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, usingCache, usingDOF);
checkCUDAError("generate camera ray");
}
else
{
cudaMemcpy(dev_paths, dev_cache_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_intersections, dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
depth = 1;
}
}
else
{
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, usingCache, usingDOF);
checkCUDAError("generate camera ray");
}
cudaDeviceSynchronize();
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, useBVH);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
if (!cacheAvailable && usingCache)
{
cudaMemcpy(dev_cache_paths, dev_paths, pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_cache_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
SetCacheState(true);
cudaDeviceSynchronize();
}
#pragma region SortingMaterial
//thrust::device_ptr<ShadeableIntersection> dev_thrust_intersections = thrust::device_ptr<ShadeableIntersection>(dev_intersections);
//thrust::device_ptr<PathSegment> dev_thrust_PathSegment = thrust::device_ptr<PathSegment>(dev_paths);
//thrust::sort_by_key(thrust::device, dev_thrust_intersections, dev_thrust_intersections + num_paths, dev_thrust_PathSegment, ShadeableIntersectionComparator());
#pragma endregion
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
const Camera *camcpy = &cam;
shadeBSDFMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials, depth, *camcpy
);
CompactionStencil << <numblocksPathSegmentTracing, blockSize1d >> > (num_paths,
dev_paths, dev_Stencil);
PathSegment* itr = thrust::stable_partition(thrust::device, dev_paths, dev_paths + num_paths, dev_Stencil, hasTerminated());
int n = itr - dev_paths;
num_paths = n;
if (num_paths == 0)
{
iterationComplete = true; // TODO: should be based off stream compaction results.
}
//iterationComplete = true; // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
d9cd80bf8005f10197daf2de9e30c05a4d596440.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <thrust/binary_search.h>
#include <thrust/for_each.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
// Benchmark data, shared at https://github.com/rapidsai/cudf/pull/4703, shows
// that the single kernel optimization generally performs better, but when the
// number of chars/col is beyond a certain threshold memcpy performs better.
// This heuristic estimates which strategy will give better performance by
// comparing the mean chars/col with values from the above table.
constexpr bool use_fused_kernel_heuristic(bool const has_nulls,
size_t const total_bytes,
size_t const num_columns)
{
return has_nulls ? total_bytes < num_columns * 1572864 // midpoint of 1048576 and 2097152
: total_bytes < num_columns * 393216; // midpoint of 262144 and 524288
}
// Using a functor instead of a lambda as a workaround for:
// error: The enclosing parent function ("create_strings_device_views") for an
// extended __device__ lambda must not have deduced return type
struct chars_size_transform {
__device__ size_t operator()(column_device_view const& col) const
{
if (col.size() > 0) {
constexpr auto offsets_index = strings_column_view::offsets_column_index;
auto d_offsets = col.child(offsets_index).data<int32_t>();
return d_offsets[col.size() + col.offset()] - d_offsets[col.offset()];
} else {
return 0;
}
}
};
auto create_strings_device_views(std::vector<column_view> const& views, hipStream_t stream)
{
// Create device views for each input view
using CDViewPtr =
decltype(column_device_view::create(std::declval<column_view>(), std::declval<hipStream_t>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(
views.cbegin(), views.cend(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views = rmm::device_vector<column_device_view>{device_views};
// Compute the partition offsets and size of offset column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto input_offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(input_offsets.begin()),
[](auto const& col) { return static_cast<size_t>(col.size()); },
thrust::plus<size_t>{});
auto const d_input_offsets = rmm::device_vector<size_t>{input_offsets};
auto const output_size = input_offsets.back();
// Compute the partition offsets and size of chars column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
// Note: Using separate transform and inclusive_scan because
// transform_inclusive_scan fails to compile with:
// error: the default constructor of "cudf::column_device_view" cannot be
// referenced -- it is a deleted function
auto d_partition_offsets = rmm::device_vector<size_t>(views.size() + 1);
thrust::transform(rmm::exec_policy(stream)->on(stream),
d_views.cbegin(),
d_views.cend(),
std::next(d_partition_offsets.begin()),
chars_size_transform{});
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_partition_offsets.cbegin(),
d_partition_offsets.cend(),
d_partition_offsets.begin());
auto const output_chars_size = d_partition_offsets.back();
return std::make_tuple(std::move(device_view_owners),
std::move(d_views),
std::move(d_input_offsets),
std::move(d_partition_offsets),
output_size,
output_chars_size);
}
template <size_type block_size, bool Nullable>
__global__ void fused_concatenate_string_offset_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
size_type* output_data,
bitmask_type* output_mask,
size_type* out_valid_count)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_data = input_view.child(offsets_child).data<int32_t>();
output_data[output_index] =
input_data[offset_index + input_view.offset()] // handle parent offset
- input_data[input_view.offset()] // subract first offset if non-zero
+ partition_offsets[partition_index]; // add offset of source column
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % cudf::detail::warp_size == 0) {
output_mask[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
// Fill final offsets index with total size of char data
if (output_index == output_size) {
output_data[output_size] = partition_offsets[num_input_views];
}
if (Nullable) {
using cudf::detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
__global__ void fused_concatenate_string_chars_kernel(column_device_view const* input_views,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
char* output_data)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, partition_offsets, partition_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - partition_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_offsets_data = input_view.child(offsets_child).data<int32_t>();
constexpr auto chars_child = strings_column_view::chars_column_index;
auto const* input_chars_data = input_view.child(chars_child).data<char>();
auto const first_char = input_offsets_data[input_view.offset()];
output_data[output_index] = input_chars_data[offset_index + first_char];
output_index += blockDim.x * gridDim.x;
}
}
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// Compute output sizes
auto const device_views = create_strings_device_views(columns, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_input_offsets = std::get<2>(device_views);
auto const& d_partition_offsets = std::get<3>(device_views);
auto const strings_count = std::get<4>(device_views);
auto const total_bytes = std::get<5>(device_views);
auto const offsets_count = strings_count + 1;
if (strings_count == 0) { return make_empty_strings_column(mr, stream); }
CUDF_EXPECTS(offsets_count <= std::numeric_limits<size_type>::max(),
"total number of strings is too large for cudf column");
CUDF_EXPECTS(total_bytes <= std::numeric_limits<size_type>::max(),
"total size of strings is too large for cudf column");
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
// create chars column
auto chars_column =
make_numeric_column(data_type{INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
auto d_new_chars = chars_column->mutable_view().data<char>();
chars_column->set_null_count(0);
// create offsets column
auto offsets_column =
make_numeric_column(data_type{INT32}, offsets_count, mask_state::UNALLOCATED, stream, mr);
auto d_new_offsets = offsets_column->mutable_view().data<int32_t>();
offsets_column->set_null_count(0);
rmm::device_buffer null_mask;
size_type null_count{};
if (has_nulls) {
null_mask = create_null_mask(strings_count, mask_state::UNINITIALIZED, stream, mr);
}
{ // Copy offsets columns with single kernel launch
rmm::device_scalar<size_type> d_valid_count(0);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(offsets_count, block_size);
auto const kernel = has_nulls ? fused_concatenate_string_offset_kernel<block_size, true>
: fused_concatenate_string_offset_kernel<block_size, false>;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream,
d_views.data().get(),
d_input_offsets.data().get(),
d_partition_offsets.data().get(),
static_cast<size_type>(d_views.size()),
strings_count,
d_new_offsets,
reinterpret_cast<bitmask_type*>(null_mask.data()),
d_valid_count.data());
if (has_nulls) { null_count = strings_count - d_valid_count.value(stream); }
}
if (total_bytes > 0) {
// Use a heuristic to guess when the fused kernel will be faster than memcpy
if (use_fused_kernel_heuristic(has_nulls, total_bytes, columns.size())) {
// Use single kernel launch to copy chars columns
constexpr size_type block_size{256};
cudf::detail::grid_1d config(total_bytes, block_size);
auto const kernel = fused_concatenate_string_chars_kernel;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream,
d_views.data().get(),
d_partition_offsets.data().get(),
static_cast<size_type>(d_views.size()),
total_bytes,
d_new_chars);
} else {
// Memcpy each input chars column (more efficient for very large strings)
for (auto column = columns.begin(); column != columns.end(); ++column) {
size_type column_size = column->size();
if (column_size == 0) // nothing to do
continue; // empty column may not have children
size_type column_offset = column->offset();
column_view offsets_child = column->child(strings_column_view::offsets_column_index);
column_view chars_child = column->child(strings_column_view::chars_column_index);
auto d_offsets = offsets_child.data<int32_t>() + column_offset;
int32_t bytes_offset = thrust::device_pointer_cast(d_offsets)[0];
// copy the chars column data
auto d_chars = chars_child.data<char>() + bytes_offset;
size_type bytes = thrust::device_pointer_cast(d_offsets)[column_size] - bytes_offset;
CUDA_TRY(hipMemcpyAsync(d_new_chars, d_chars, bytes, hipMemcpyDeviceToDevice, stream));
// get ready for the next column
d_new_chars += bytes;
}
}
}
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
| d9cd80bf8005f10197daf2de9e30c05a4d596440.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <thrust/binary_search.h>
#include <thrust/for_each.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
// Benchmark data, shared at https://github.com/rapidsai/cudf/pull/4703, shows
// that the single kernel optimization generally performs better, but when the
// number of chars/col is beyond a certain threshold memcpy performs better.
// This heuristic estimates which strategy will give better performance by
// comparing the mean chars/col with values from the above table.
constexpr bool use_fused_kernel_heuristic(bool const has_nulls,
size_t const total_bytes,
size_t const num_columns)
{
return has_nulls ? total_bytes < num_columns * 1572864 // midpoint of 1048576 and 2097152
: total_bytes < num_columns * 393216; // midpoint of 262144 and 524288
}
// Using a functor instead of a lambda as a workaround for:
// error: The enclosing parent function ("create_strings_device_views") for an
// extended __device__ lambda must not have deduced return type
struct chars_size_transform {
__device__ size_t operator()(column_device_view const& col) const
{
if (col.size() > 0) {
constexpr auto offsets_index = strings_column_view::offsets_column_index;
auto d_offsets = col.child(offsets_index).data<int32_t>();
return d_offsets[col.size() + col.offset()] - d_offsets[col.offset()];
} else {
return 0;
}
}
};
auto create_strings_device_views(std::vector<column_view> const& views, cudaStream_t stream)
{
// Create device views for each input view
using CDViewPtr =
decltype(column_device_view::create(std::declval<column_view>(), std::declval<cudaStream_t>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(
views.cbegin(), views.cend(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views = rmm::device_vector<column_device_view>{device_views};
// Compute the partition offsets and size of offset column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
auto input_offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(input_offsets.begin()),
[](auto const& col) { return static_cast<size_t>(col.size()); },
thrust::plus<size_t>{});
auto const d_input_offsets = rmm::device_vector<size_t>{input_offsets};
auto const output_size = input_offsets.back();
// Compute the partition offsets and size of chars column
// Note: Using 64-bit size_t so we can detect overflow of 32-bit size_type
// Note: Using separate transform and inclusive_scan because
// transform_inclusive_scan fails to compile with:
// error: the default constructor of "cudf::column_device_view" cannot be
// referenced -- it is a deleted function
auto d_partition_offsets = rmm::device_vector<size_t>(views.size() + 1);
thrust::transform(rmm::exec_policy(stream)->on(stream),
d_views.cbegin(),
d_views.cend(),
std::next(d_partition_offsets.begin()),
chars_size_transform{});
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_partition_offsets.cbegin(),
d_partition_offsets.cend(),
d_partition_offsets.begin());
auto const output_chars_size = d_partition_offsets.back();
return std::make_tuple(std::move(device_view_owners),
std::move(d_views),
std::move(d_input_offsets),
std::move(d_partition_offsets),
output_size,
output_chars_size);
}
template <size_type block_size, bool Nullable>
__global__ void fused_concatenate_string_offset_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
size_type* output_data,
bitmask_type* output_mask,
size_type* out_valid_count)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_data = input_view.child(offsets_child).data<int32_t>();
output_data[output_index] =
input_data[offset_index + input_view.offset()] // handle parent offset
- input_data[input_view.offset()] // subract first offset if non-zero
+ partition_offsets[partition_index]; // add offset of source column
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % cudf::detail::warp_size == 0) {
output_mask[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
// Fill final offsets index with total size of char data
if (output_index == output_size) {
output_data[output_size] = partition_offsets[num_input_views];
}
if (Nullable) {
using cudf::detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
__global__ void fused_concatenate_string_chars_kernel(column_device_view const* input_views,
size_t const* partition_offsets,
size_type const num_input_views,
size_type const output_size,
char* output_data)
{
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, partition_offsets, partition_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - partition_offsets;
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
constexpr auto offsets_child = strings_column_view::offsets_column_index;
auto const* input_offsets_data = input_view.child(offsets_child).data<int32_t>();
constexpr auto chars_child = strings_column_view::chars_column_index;
auto const* input_chars_data = input_view.child(chars_child).data<char>();
auto const first_char = input_offsets_data[input_view.offset()];
output_data[output_index] = input_chars_data[offset_index + first_char];
output_index += blockDim.x * gridDim.x;
}
}
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// Compute output sizes
auto const device_views = create_strings_device_views(columns, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_input_offsets = std::get<2>(device_views);
auto const& d_partition_offsets = std::get<3>(device_views);
auto const strings_count = std::get<4>(device_views);
auto const total_bytes = std::get<5>(device_views);
auto const offsets_count = strings_count + 1;
if (strings_count == 0) { return make_empty_strings_column(mr, stream); }
CUDF_EXPECTS(offsets_count <= std::numeric_limits<size_type>::max(),
"total number of strings is too large for cudf column");
CUDF_EXPECTS(total_bytes <= std::numeric_limits<size_type>::max(),
"total size of strings is too large for cudf column");
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
// create chars column
auto chars_column =
make_numeric_column(data_type{INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
auto d_new_chars = chars_column->mutable_view().data<char>();
chars_column->set_null_count(0);
// create offsets column
auto offsets_column =
make_numeric_column(data_type{INT32}, offsets_count, mask_state::UNALLOCATED, stream, mr);
auto d_new_offsets = offsets_column->mutable_view().data<int32_t>();
offsets_column->set_null_count(0);
rmm::device_buffer null_mask;
size_type null_count{};
if (has_nulls) {
null_mask = create_null_mask(strings_count, mask_state::UNINITIALIZED, stream, mr);
}
{ // Copy offsets columns with single kernel launch
rmm::device_scalar<size_type> d_valid_count(0);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(offsets_count, block_size);
auto const kernel = has_nulls ? fused_concatenate_string_offset_kernel<block_size, true>
: fused_concatenate_string_offset_kernel<block_size, false>;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>(
d_views.data().get(),
d_input_offsets.data().get(),
d_partition_offsets.data().get(),
static_cast<size_type>(d_views.size()),
strings_count,
d_new_offsets,
reinterpret_cast<bitmask_type*>(null_mask.data()),
d_valid_count.data());
if (has_nulls) { null_count = strings_count - d_valid_count.value(stream); }
}
if (total_bytes > 0) {
// Use a heuristic to guess when the fused kernel will be faster than memcpy
if (use_fused_kernel_heuristic(has_nulls, total_bytes, columns.size())) {
// Use single kernel launch to copy chars columns
constexpr size_type block_size{256};
cudf::detail::grid_1d config(total_bytes, block_size);
auto const kernel = fused_concatenate_string_chars_kernel;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>(
d_views.data().get(),
d_partition_offsets.data().get(),
static_cast<size_type>(d_views.size()),
total_bytes,
d_new_chars);
} else {
// Memcpy each input chars column (more efficient for very large strings)
for (auto column = columns.begin(); column != columns.end(); ++column) {
size_type column_size = column->size();
if (column_size == 0) // nothing to do
continue; // empty column may not have children
size_type column_offset = column->offset();
column_view offsets_child = column->child(strings_column_view::offsets_column_index);
column_view chars_child = column->child(strings_column_view::chars_column_index);
auto d_offsets = offsets_child.data<int32_t>() + column_offset;
int32_t bytes_offset = thrust::device_pointer_cast(d_offsets)[0];
// copy the chars column data
auto d_chars = chars_child.data<char>() + bytes_offset;
size_type bytes = thrust::device_pointer_cast(d_offsets)[column_size] - bytes_offset;
CUDA_TRY(cudaMemcpyAsync(d_new_chars, d_chars, bytes, cudaMemcpyDeviceToDevice, stream));
// get ready for the next column
d_new_chars += bytes;
}
}
}
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
6530d019f44dc2ce0ceb4e8c921b5878794b92b0.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <math.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
__global__ void myKernel2(float *x, long n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (long i = tid; i < n; i += blockDim.x * gridDim.x) {
;//sqrt(pow(3.14159, 2));
x[i] = sqrt(pow(3.14159, (double)x[i]));
}
}
long NC1 = 131072;
int mainM2(int argc, char **argv)
{
/*
srand (time(NULL));
if (checkCmdLineFlag(argc, (const char **)argv, "N"))
{
getCmdLineArgumentValue<long>(argc, (const char **)argv, "N", &N);
}
float *h_myKernel1Data;
h_myKernel1Data = (float*)malloc(N * sizeof(float));
for (long i = 0;i < N;i++)
h_myKernel1Data[i] = rand() * 1000;
float *myKernel1Data;
hipMalloc(&myKernel1Data, N * sizeof(float));
hipError_t error;
error = hipMemcpy(myKernel1Data, h_myKernel1Data, N * sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 my1Threads(256, 1);
if (N % my1Threads.x != 0)
{
printf("invalid N\n");
exit(111000);
}
dim3 my1Blocks(sqrt(N / my1Threads.x), sqrt(N / my1Threads.x));
printf("N: %ld, grid(%d,%d), block(%d,%d)\n", N, my1Blocks.x, my1Blocks.y, my1Threads.x, my1Threads.y);
myKernel1<<<my1Blocks, my1Threads>>>(myKernel1Data, N);
error = hipDeviceSynchronize();
hipDeviceReset();
*/
return 0;
} | 6530d019f44dc2ce0ceb4e8c921b5878794b92b0.cu |
// System includes
#include <stdio.h>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
__global__ void myKernel2(float *x, long n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (long i = tid; i < n; i += blockDim.x * gridDim.x) {
;//sqrt(pow(3.14159, 2));
x[i] = sqrt(pow(3.14159, (double)x[i]));
}
}
long NC1 = 131072;
int mainM2(int argc, char **argv)
{
/*
srand (time(NULL));
if (checkCmdLineFlag(argc, (const char **)argv, "N"))
{
getCmdLineArgumentValue<long>(argc, (const char **)argv, "N", &N);
}
float *h_myKernel1Data;
h_myKernel1Data = (float*)malloc(N * sizeof(float));
for (long i = 0;i < N;i++)
h_myKernel1Data[i] = rand() * 1000;
float *myKernel1Data;
cudaMalloc(&myKernel1Data, N * sizeof(float));
cudaError_t error;
error = cudaMemcpy(myKernel1Data, h_myKernel1Data, N * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 my1Threads(256, 1);
if (N % my1Threads.x != 0)
{
printf("invalid N\n");
exit(111000);
}
dim3 my1Blocks(sqrt(N / my1Threads.x), sqrt(N / my1Threads.x));
printf("N: %ld, grid(%d,%d), block(%d,%d)\n", N, my1Blocks.x, my1Blocks.y, my1Threads.x, my1Threads.y);
myKernel1<<<my1Blocks, my1Threads>>>(myKernel1Data, N);
error = cudaDeviceSynchronize();
cudaDeviceReset();
*/
return 0;
} |
4d926a7558263ac33e93dbcca05601878809e9e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <assert.h>
#include "splitCounter_kernel.cu"
int main(int argc, char ** argv) {
// local variables
unsigned int * h_counters = NULL, * h_counters_temp = NULL;
unsigned int * h_outArr = NULL, * h_outArr_temp = NULL;
bool pageAlign = false;
const int numRuns = 1;
int numTBs = 0, tbSize = 0;
if (argc != 4) {
fprintf(stderr, "./splitCounter <numTBs> <tbSize> <pageAlign>\n");
fprintf(stderr, "where:\n");
fprintf(stderr, "\t<numTBs>: number of thread blocks to launch\n");
fprintf(stderr, "\t<tbSize>: number of threads in a thread block\n");
fprintf(stderr, "\t<pageAlign>: if 1 the arrays will be page aligned, else arrays will be unaligned.\n");
exit(-1);
}
// parse input args
numTBs = atoi(argv[1]);
tbSize = atoi(argv[2]);
assert(tbSize <= 256); // scratchpad size limited to 256 for 8 TBs to execute
pageAlign = (atoi(argv[3]) == 1);
unsigned int numThrs = (numTBs * tbSize);
/*
// get regions
// h_counters holds the counters, which are written with relaxed atomics
region_t countReg = RELAX_ATOM_REGION;
// h_outArr is written in the kernel with data accesses, use special region
region_t outReg = SPECIAL_REGION;
*/
fprintf(stdout, "Initializing data...\n");
fprintf(stdout, "...allocating CPU memory.\n");
// every other thread in each TB gets its own counter
h_counters_temp = (unsigned int *)malloc(((numThrs/2)*sizeof(unsigned int)) + 0x1000/*, countReg*/);
// each thread gets its own location in the output array too
h_outArr_temp = (unsigned int *)malloc((numThrs*sizeof(unsigned int)) + 0x1000/*, outReg*/);
if (pageAlign) {
h_counters = (unsigned int *)(((((unsigned long long)h_counters_temp) >> 12) << 12) + 0x1000);
h_outArr = (unsigned int *)(((((unsigned long long)h_outArr_temp) >> 12) << 12) + 0x1000);
} else {
h_counters = h_counters_temp;
h_outArr = h_outArr_temp;
}
// initialize arrays
fprintf(stdout, "...initializing CPU memory.\n");
for (int i = 0; i < (numThrs/2); ++i) {
h_counters[i] = 0;
}
for (int i = 0; i < numThrs; ++i) {
h_outArr[i] = 0;
}
/*
// wrote to both regions on CPU, so they need an epilogue
__denovo_epilogue(2, countReg, outReg);
*/
// now that the initialization stuff is done, reset the counters and start
// the simulation!
fprintf(stdout,
"Launching kernel - %d runs with %d TBs and %d threads/TB\n",
numRuns, numTBs, tbSize);
for (int iter = 0; iter < numRuns; ++iter) {
hipLaunchKernelGGL(( splitCounter_kernel), dim3(numTBs), dim3(tbSize), 0, 0, h_counters,
h_outArr/*,
countReg,
outReg*/);
/*
// kernel writes counter and output arrays, so need to do an epilogue on them
__denovo_epilogue(2, countReg, outReg);
*/
}
bool passFail = true;
// each repeat of the kernel adds INC_VAL to the counter
for (int i = 0; i < (numThrs/2); ++i) {
if (h_counters[i] != numRuns*INC_VAL) {
fprintf(stderr, "ERROR: h_counters[%] != %d, = %u\n",
i, numRuns, h_counters[i]);
passFail = false;
}
}
// for now the half-warps doing the reads always go first, so they return 0
// if there are multiple runs, then we should have some partial sum from
// the previous kernel (assuming these half-warps still execute first,
// (numRuns-1)*INC_VAL*numCounters where numCounters == numThrs/2)
int expectedVal = ((numRuns-1)*INC_VAL)*(numThrs/2);
for (int i = 0; i < numThrs; ++i) {
if (h_outArr[i] != expectedVal) {
fprintf(stderr, "\tThread %d: %u, != %d\n", i, h_outArr[i], expectedVal);
passFail = false;
}
}
if (passFail) { fprintf(stdout, "PASSED\n"); }
else { fprintf(stdout, "FAILED\n"); }
#ifdef DEBUG
// print the final values of the counters and the output array
fprintf(stdout, "Counter Values:\n");
for (int i = 0; i < (numThrs/2); ++i) {
fprintf(stdout, "\t[%d] = %u\n", i, h_counters[i]);
}
fprintf(stdout, "Per-Thread Output Values\n");
for (int i = 0; i < numThrs; ++i) {
fprintf(stdout, "\tThread %d: %u\n", i, h_outArr[i]);
}
#endif // #ifdef DEBUG
free(h_counters);
free(h_outArr);
return 0;
}
| 4d926a7558263ac33e93dbcca05601878809e9e3.cu | #include <cstdio>
#include <assert.h>
#include "splitCounter_kernel.cu"
int main(int argc, char ** argv) {
// local variables
unsigned int * h_counters = NULL, * h_counters_temp = NULL;
unsigned int * h_outArr = NULL, * h_outArr_temp = NULL;
bool pageAlign = false;
const int numRuns = 1;
int numTBs = 0, tbSize = 0;
if (argc != 4) {
fprintf(stderr, "./splitCounter <numTBs> <tbSize> <pageAlign>\n");
fprintf(stderr, "where:\n");
fprintf(stderr, "\t<numTBs>: number of thread blocks to launch\n");
fprintf(stderr, "\t<tbSize>: number of threads in a thread block\n");
fprintf(stderr, "\t<pageAlign>: if 1 the arrays will be page aligned, else arrays will be unaligned.\n");
exit(-1);
}
// parse input args
numTBs = atoi(argv[1]);
tbSize = atoi(argv[2]);
assert(tbSize <= 256); // scratchpad size limited to 256 for 8 TBs to execute
pageAlign = (atoi(argv[3]) == 1);
unsigned int numThrs = (numTBs * tbSize);
/*
// get regions
// h_counters holds the counters, which are written with relaxed atomics
region_t countReg = RELAX_ATOM_REGION;
// h_outArr is written in the kernel with data accesses, use special region
region_t outReg = SPECIAL_REGION;
*/
fprintf(stdout, "Initializing data...\n");
fprintf(stdout, "...allocating CPU memory.\n");
// every other thread in each TB gets its own counter
h_counters_temp = (unsigned int *)malloc(((numThrs/2)*sizeof(unsigned int)) + 0x1000/*, countReg*/);
// each thread gets its own location in the output array too
h_outArr_temp = (unsigned int *)malloc((numThrs*sizeof(unsigned int)) + 0x1000/*, outReg*/);
if (pageAlign) {
h_counters = (unsigned int *)(((((unsigned long long)h_counters_temp) >> 12) << 12) + 0x1000);
h_outArr = (unsigned int *)(((((unsigned long long)h_outArr_temp) >> 12) << 12) + 0x1000);
} else {
h_counters = h_counters_temp;
h_outArr = h_outArr_temp;
}
// initialize arrays
fprintf(stdout, "...initializing CPU memory.\n");
for (int i = 0; i < (numThrs/2); ++i) {
h_counters[i] = 0;
}
for (int i = 0; i < numThrs; ++i) {
h_outArr[i] = 0;
}
/*
// wrote to both regions on CPU, so they need an epilogue
__denovo_epilogue(2, countReg, outReg);
*/
// now that the initialization stuff is done, reset the counters and start
// the simulation!
fprintf(stdout,
"Launching kernel - %d runs with %d TBs and %d threads/TB\n",
numRuns, numTBs, tbSize);
for (int iter = 0; iter < numRuns; ++iter) {
splitCounter_kernel<<<numTBs, tbSize>>>(h_counters,
h_outArr/*,
countReg,
outReg*/);
/*
// kernel writes counter and output arrays, so need to do an epilogue on them
__denovo_epilogue(2, countReg, outReg);
*/
}
bool passFail = true;
// each repeat of the kernel adds INC_VAL to the counter
for (int i = 0; i < (numThrs/2); ++i) {
if (h_counters[i] != numRuns*INC_VAL) {
fprintf(stderr, "ERROR: h_counters[%] != %d, = %u\n",
i, numRuns, h_counters[i]);
passFail = false;
}
}
// for now the half-warps doing the reads always go first, so they return 0
// if there are multiple runs, then we should have some partial sum from
// the previous kernel (assuming these half-warps still execute first,
// (numRuns-1)*INC_VAL*numCounters where numCounters == numThrs/2)
int expectedVal = ((numRuns-1)*INC_VAL)*(numThrs/2);
for (int i = 0; i < numThrs; ++i) {
if (h_outArr[i] != expectedVal) {
fprintf(stderr, "\tThread %d: %u, != %d\n", i, h_outArr[i], expectedVal);
passFail = false;
}
}
if (passFail) { fprintf(stdout, "PASSED\n"); }
else { fprintf(stdout, "FAILED\n"); }
#ifdef DEBUG
// print the final values of the counters and the output array
fprintf(stdout, "Counter Values:\n");
for (int i = 0; i < (numThrs/2); ++i) {
fprintf(stdout, "\t[%d] = %u\n", i, h_counters[i]);
}
fprintf(stdout, "Per-Thread Output Values\n");
for (int i = 0; i < numThrs; ++i) {
fprintf(stdout, "\tThread %d: %u\n", i, h_outArr[i]);
}
#endif // #ifdef DEBUG
free(h_counters);
free(h_outArr);
return 0;
}
|
48e08a1697809a94c0c26b141a2ff052b4449eaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
bool is_power_of_two(unsigned int x)
{
// https://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2/600306#600306
return (x != 0) && ((x & (x - 1)) == 0);
}
template <unsigned int blockSize>
__global__ void reduce(float *g_in, double *g_out, unsigned int n)
{
// largely inspired by
// http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
extern __shared__ double s_data[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
s_data[tid] = 0;
while(i < n) {
s_data[tid] += g_in[i] + g_in[i+blockSize];
i += gridSize;
}
__syncthreads();
// following is the completely unrolled
// loop for the reduction of the shared array
if(blockSize >= 512) {
if(tid < 256)
s_data[tid] += s_data[tid + 256];
__syncthreads();
}
if(blockSize >= 256) {
if(tid < 128)
s_data[tid] += s_data[tid + 128];
__syncthreads();
}
if(blockSize >= 128) {
if(tid < 64)
s_data[tid] += s_data[tid + 64];
__syncthreads();
}
if(tid < 32) {
// the code on the slides misses the sync after the partial
// recution of the shared array
if(blockSize >= 64) {s_data[tid] += s_data[tid + 32]; __syncthreads();}
if(blockSize >= 32) {s_data[tid] += s_data[tid + 16]; __syncthreads();}
if(blockSize >= 16) {s_data[tid] += s_data[tid + 8]; __syncthreads();}
if(blockSize >= 8) {s_data[tid] += s_data[tid + 4]; __syncthreads();}
if(blockSize >= 4) {s_data[tid] += s_data[tid + 2]; __syncthreads();}
if(blockSize >= 2) {s_data[tid] += s_data[tid + 1]; __syncthreads();}
}
if(tid == 0) {
g_out[blockIdx.x] = s_data[0];
}
}
int main(int argc, char const *argv[])
{
if(argc <= 1) {
printf("Please call the program with an integer that is a"
" power of two.");
return 1;
}
unsigned int N = atoi(argv[1]);
if(!is_power_of_two(N)) {
printf("Please call the program with an integer that is a"
" power of two.");
return 1;
}
float *in;
double *out;
hipMallocManaged(&in, sizeof *in * N);
// initialize array on host with ones
for(int i=0; i<N; i++) {
in[i] = 1;
}
int threadCountGrid = ceil((double)N / log2(N));
int dimBlock = 512;
printf("Using %d threads in total\n", max(threadCountGrid, dimBlock));
printf("Divided on blocks of size %d\n", dimBlock);
int dimGrid = min(64, threadCountGrid / 512);
printf("With %d blocks\n", dimGrid);
size_t smemSize = sizeof *out * dimBlock;
hipMallocManaged(&out, sizeof *out * dimGrid);
switch(dimBlock) {
case 512:
hipLaunchKernelGGL(( reduce<512>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 256:
hipLaunchKernelGGL(( reduce<256>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 128:
hipLaunchKernelGGL(( reduce<128>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 64:
hipLaunchKernelGGL(( reduce<64>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 32:
hipLaunchKernelGGL(( reduce<32>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 16:
hipLaunchKernelGGL(( reduce<16>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 8:
hipLaunchKernelGGL(( reduce<8>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 4:
hipLaunchKernelGGL(( reduce<4>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 2:
hipLaunchKernelGGL(( reduce<2>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
case 1:
hipLaunchKernelGGL(( reduce<1>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, in, out, N);
break;
}
hipDeviceSynchronize();
// printf("Partial sums from reduction on GPU:\n");
// for(int i=0; i<dimGrid; i++) {
// printf("%f,", out[i]);
// }
// printf("\n");
double sum = 0;
printf("Complete sum-reduction: ");
for(int i=0; i<dimGrid; i++)
sum += out[i];
printf("%f\n", sum);
hipFree(in);
hipFree(out);
return 0;
}
| 48e08a1697809a94c0c26b141a2ff052b4449eaf.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
bool is_power_of_two(unsigned int x)
{
// https://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2/600306#600306
return (x != 0) && ((x & (x - 1)) == 0);
}
template <unsigned int blockSize>
__global__ void reduce(float *g_in, double *g_out, unsigned int n)
{
// largely inspired by
// http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
extern __shared__ double s_data[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
s_data[tid] = 0;
while(i < n) {
s_data[tid] += g_in[i] + g_in[i+blockSize];
i += gridSize;
}
__syncthreads();
// following is the completely unrolled
// loop for the reduction of the shared array
if(blockSize >= 512) {
if(tid < 256)
s_data[tid] += s_data[tid + 256];
__syncthreads();
}
if(blockSize >= 256) {
if(tid < 128)
s_data[tid] += s_data[tid + 128];
__syncthreads();
}
if(blockSize >= 128) {
if(tid < 64)
s_data[tid] += s_data[tid + 64];
__syncthreads();
}
if(tid < 32) {
// the code on the slides misses the sync after the partial
// recution of the shared array
if(blockSize >= 64) {s_data[tid] += s_data[tid + 32]; __syncthreads();}
if(blockSize >= 32) {s_data[tid] += s_data[tid + 16]; __syncthreads();}
if(blockSize >= 16) {s_data[tid] += s_data[tid + 8]; __syncthreads();}
if(blockSize >= 8) {s_data[tid] += s_data[tid + 4]; __syncthreads();}
if(blockSize >= 4) {s_data[tid] += s_data[tid + 2]; __syncthreads();}
if(blockSize >= 2) {s_data[tid] += s_data[tid + 1]; __syncthreads();}
}
if(tid == 0) {
g_out[blockIdx.x] = s_data[0];
}
}
int main(int argc, char const *argv[])
{
if(argc <= 1) {
printf("Please call the program with an integer that is a"
" power of two.");
return 1;
}
unsigned int N = atoi(argv[1]);
if(!is_power_of_two(N)) {
printf("Please call the program with an integer that is a"
" power of two.");
return 1;
}
float *in;
double *out;
cudaMallocManaged(&in, sizeof *in * N);
// initialize array on host with ones
for(int i=0; i<N; i++) {
in[i] = 1;
}
int threadCountGrid = ceil((double)N / log2(N));
int dimBlock = 512;
printf("Using %d threads in total\n", max(threadCountGrid, dimBlock));
printf("Divided on blocks of size %d\n", dimBlock);
int dimGrid = min(64, threadCountGrid / 512);
printf("With %d blocks\n", dimGrid);
size_t smemSize = sizeof *out * dimBlock;
cudaMallocManaged(&out, sizeof *out * dimGrid);
switch(dimBlock) {
case 512:
reduce<512><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 256:
reduce<256><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 128:
reduce<128><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 64:
reduce<64><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 32:
reduce<32><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 16:
reduce<16><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 8:
reduce<8><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 4:
reduce<4><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 2:
reduce<2><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
case 1:
reduce<1><<<dimGrid, dimBlock, smemSize>>>(in, out, N);
break;
}
cudaDeviceSynchronize();
// printf("Partial sums from reduction on GPU:\n");
// for(int i=0; i<dimGrid; i++) {
// printf("%f,", out[i]);
// }
// printf("\n");
double sum = 0;
printf("Complete sum-reduction: ");
for(int i=0; i<dimGrid; i++)
sum += out[i];
printf("%f\n", sum);
cudaFree(in);
cudaFree(out);
return 0;
}
|
b3101886f53dd902f4ab5d709f312153196a93bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "BatchInput.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__device__ void zeroMeanBatchs(float * sbatchInput, const unsigned int& y)
{
__shared__ float sdata[128];
volatile float* vsdata = sdata;
float mean;
__syncthreads();
float rdata = sbatchInput[y];
float rdata_p = y<PATCHWIDTH*PATCHWIDTH-64?sbatchInput[y + 64]:0;
float rdata64 = sbatchInput[y+128];
float rdata_p64 = y<PATCHWIDTH*PATCHWIDTH-64?sbatchInput[y + 128 + 64]:0;
if(y<64)
{
sdata[y] = rdata = rdata + rdata_p;
sdata[y+64] = rdata64 = rdata64 + rdata_p64;
}
__syncthreads();
if(y<32)
{
#pragma unroll
for(int s = 32; s > 0; s >>= 1)
{
if(y<s)
{
vsdata[y] = rdata = rdata + vsdata[y+s];
vsdata[y+64] = rdata64 = rdata64 + vsdata[y+64+s];
}
}
}
if(y==0)
{
sdata[y] = rdata/float(PATCHWIDTH*PATCHWIDTH);
sdata[y+64] = rdata64/float(PATCHWIDTH*PATCHWIDTH);
}
__syncthreads();
sbatchInput[y] = sbatchInput[y]-vsdata[0];
sbatchInput[y+128] = sbatchInput[y+128]-vsdata[64];
}
__device__ void deviceNormalizeBatchs(float* batchInput,float const* sbatchInput,const unsigned int& x, const unsigned int& y)
{
__shared__ float sdata[128];
volatile float* vsdata = sdata;
float sumsq = 0;
__syncthreads();
float rdata = pow(sbatchInput[y],2);
float rdata_p = y<PATCHWIDTH*PATCHWIDTH-64?pow(sbatchInput[y + 64],2):0;
float rdata64 = pow(sbatchInput[y+128],2);
float rdata_p64 = y<PATCHWIDTH*PATCHWIDTH-64?pow(sbatchInput[y + 128 + 64],2):0;
if(y<64)
{
sdata[y] = rdata = rdata + rdata_p;
sdata[y+64] = rdata64 = rdata64 + rdata_p64;
}
__syncthreads();
if(y<32)
{
#pragma unroll
for(int s = 32; s > 0; s >>= 1)
{
vsdata[y] = rdata = rdata + vsdata[y+s];
vsdata[y+64] = rdata64 = rdata64 + vsdata[y+64+s];
}
}
#if COM1NORM
if(y==0)
{
sdata[y] = pow(rdata+rdata64+float(EPS),float(0.5));
// sdata[y+64] = pow(rdata64+float(EPS),float(0.5));
}
__syncthreads();
const unsigned int ind = y + x*BASISDIM;
batchInput[ind] = sbatchInput[y]/vsdata[0];
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128]/vsdata[0];
if(vsdata[0]<1e-3)
{
// printf("\nsbsb\n");
batchInput[ind] = batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sqrt(0.005);
}
// batchInput[ind] = sbatchInput[y];
// batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128];
#else
if(y==0)
{
sdata[y] = pow(rdata+float(EPS),float(0.5));
sdata[y+64] = pow(rdata64+float(EPS),float(0.5));
}
__syncthreads();
const unsigned int ind = y + x*BASISDIM;
batchInput[ind] = sbatchInput[y]/vsdata[0];
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128]/vsdata[64];
if(vsdata[0]<1e-3)
batchInput[ind] = float(1.0)/float(PATCHWIDTH);
if(vsdata[64]<1e-3)
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = float(1.0)/float(PATCHWIDTH);
#endif
}
__global__ void globalCuts(float * lF,float* rF, float* bI, int dSfS, int ps)
{
const unsigned int x = blockIdx.y*ps + threadIdx.y;
const unsigned int y = blockIdx.x*ps + threadIdx.x;
const unsigned int batchx = blockIdx.x + blockIdx.y*ROWPATCHNUM;
const unsigned int batchy = threadIdx.x+ threadIdx.y*PATCHWIDTH;
__shared__ float sbatchInput[256];
sbatchInput[batchy] = lF[y*dSfS+x];
sbatchInput[batchy+128] = rF[y*dSfS+x];
zeroMeanBatchs(sbatchInput,batchy);
deviceNormalizeBatchs(bI,sbatchInput,batchx,batchy);
}
void BatchInput::GpuCut()
{
hipLaunchKernelGGL(( globalCuts), dim3(dim3(ROWPATCHNUM,ROWPATCHNUM)),dim3(dim3(PATCHWIDTH,PATCHWIDTH)), 0, 0, leftFoveaWh,rightFoveaWh,batchInput,downSampled_foveaSize,patchShift);
hipDeviceSynchronize();
}
__global__ void real2complex (float *a, hipfftComplex *c, int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < N && idy <N)
{
int index = idx + idy*N;
c[index].x = a[index];
c[index].y = 0.f;
}
__syncthreads();
}
/*compute idx and idy, the location of the element in the original NxN array*/
__global__ void complex2real_scaled (hipfftComplex *c, float *a, int M, int N, float scale)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( (idx>=(M-N)/2)&&(idx<M-(M-N)/2) && (idy>=(M-N)/2)&&(idy<M-(M-N)/2))
{
int index = idx + idy*M;
int index2 = idx-(M-N)/2 + (idy-(M-N)/2)*N;
a[index2] = scale*c[index].x ;
}
__syncthreads();
}
inline __device__ void mulAndScale(hipComplex& a, const hipComplex& b, const float& c){
hipComplex t = {c * (a.x * b.x - a.y * b.y), c * (a.y * b.x + a.x * b.y)};
a = t;
}
__global__ void modulateAndNormalize_kernel(hipComplex *d_Dst, hipComplex *d_Src, int N, float c )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
hipComplex a,b;
if ( idx < N && idy <N)
{
int i = idx + idy*N;
a = d_Src[i];
b = d_Dst[i];
mulAndScale(a, b, c);
d_Dst[i] = a;
}
__syncthreads();
}
void BatchInput::imgWhitening()
{
hipLaunchKernelGGL(( real2complex), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, leftFovea, d_dataL, FILT_WIDTH);
hipLaunchKernelGGL(( real2complex), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, rightFovea, d_dataR, FILT_WIDTH);
hipDeviceSynchronize();
hipfftExecC2C(fftPlan, (hipfftComplex *)d_dataL, (hipfftComplex *)d_DataSpectrumL,HIPFFT_FORWARD );
hipfftExecC2C(fftPlan, (hipfftComplex *)d_dataR, (hipfftComplex *)d_DataSpectrumR,HIPFFT_FORWARD );
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, d_DataSpectrumL, d_filter, FILT_WIDTH , 1);
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, d_DataSpectrumR, d_filter, FILT_WIDTH , 1);
hipDeviceSynchronize();
hipfftExecC2C(fftPlan, (hipfftComplex *)d_DataSpectrumL, (hipfftComplex *)d_resultL,HIPFFT_BACKWARD );
hipfftExecC2C(fftPlan, (hipfftComplex *)d_DataSpectrumR, (hipfftComplex *)d_resultR,HIPFFT_BACKWARD );
hipLaunchKernelGGL(( complex2real_scaled), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, d_resultL, leftFoveaWh, FILT_WIDTH, downSampled_foveaSize, float(1)/(FILT_WIDTH*FILT_WIDTH) );
hipLaunchKernelGGL(( complex2real_scaled), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, d_resultR, rightFoveaWh, FILT_WIDTH, downSampled_foveaSize, float(1)/(FILT_WIDTH*FILT_WIDTH));
}
void BatchInput::initFilt(float* f)
{
filtdata=f;
hipMemcpy(d_filterReal,filtdata,sizeof(float)*FILT_WIDTH*FILT_WIDTH,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( real2complex), dim3(dim3(16,16)), dim3(dim3(16,16)), 0, 0, d_filterReal, d_filter, FILT_WIDTH);
} | b3101886f53dd902f4ab5d709f312153196a93bb.cu | #include "BatchInput.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__device__ void zeroMeanBatchs(float * sbatchInput, const unsigned int& y)
{
__shared__ float sdata[128];
volatile float* vsdata = sdata;
float mean;
__syncthreads();
float rdata = sbatchInput[y];
float rdata_p = y<PATCHWIDTH*PATCHWIDTH-64?sbatchInput[y + 64]:0;
float rdata64 = sbatchInput[y+128];
float rdata_p64 = y<PATCHWIDTH*PATCHWIDTH-64?sbatchInput[y + 128 + 64]:0;
if(y<64)
{
sdata[y] = rdata = rdata + rdata_p;
sdata[y+64] = rdata64 = rdata64 + rdata_p64;
}
__syncthreads();
if(y<32)
{
#pragma unroll
for(int s = 32; s > 0; s >>= 1)
{
if(y<s)
{
vsdata[y] = rdata = rdata + vsdata[y+s];
vsdata[y+64] = rdata64 = rdata64 + vsdata[y+64+s];
}
}
}
if(y==0)
{
sdata[y] = rdata/float(PATCHWIDTH*PATCHWIDTH);
sdata[y+64] = rdata64/float(PATCHWIDTH*PATCHWIDTH);
}
__syncthreads();
sbatchInput[y] = sbatchInput[y]-vsdata[0];
sbatchInput[y+128] = sbatchInput[y+128]-vsdata[64];
}
__device__ void deviceNormalizeBatchs(float* batchInput,float const* sbatchInput,const unsigned int& x, const unsigned int& y)
{
__shared__ float sdata[128];
volatile float* vsdata = sdata;
float sumsq = 0;
__syncthreads();
float rdata = pow(sbatchInput[y],2);
float rdata_p = y<PATCHWIDTH*PATCHWIDTH-64?pow(sbatchInput[y + 64],2):0;
float rdata64 = pow(sbatchInput[y+128],2);
float rdata_p64 = y<PATCHWIDTH*PATCHWIDTH-64?pow(sbatchInput[y + 128 + 64],2):0;
if(y<64)
{
sdata[y] = rdata = rdata + rdata_p;
sdata[y+64] = rdata64 = rdata64 + rdata_p64;
}
__syncthreads();
if(y<32)
{
#pragma unroll
for(int s = 32; s > 0; s >>= 1)
{
vsdata[y] = rdata = rdata + vsdata[y+s];
vsdata[y+64] = rdata64 = rdata64 + vsdata[y+64+s];
}
}
#if COM1NORM
if(y==0)
{
sdata[y] = pow(rdata+rdata64+float(EPS),float(0.5));
// sdata[y+64] = pow(rdata64+float(EPS),float(0.5));
}
__syncthreads();
const unsigned int ind = y + x*BASISDIM;
batchInput[ind] = sbatchInput[y]/vsdata[0];
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128]/vsdata[0];
if(vsdata[0]<1e-3)
{
// printf("\nsbsb\n");
batchInput[ind] = batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sqrt(0.005);
}
// batchInput[ind] = sbatchInput[y];
// batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128];
#else
if(y==0)
{
sdata[y] = pow(rdata+float(EPS),float(0.5));
sdata[y+64] = pow(rdata64+float(EPS),float(0.5));
}
__syncthreads();
const unsigned int ind = y + x*BASISDIM;
batchInput[ind] = sbatchInput[y]/vsdata[0];
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = sbatchInput[y+128]/vsdata[64];
if(vsdata[0]<1e-3)
batchInput[ind] = float(1.0)/float(PATCHWIDTH);
if(vsdata[64]<1e-3)
batchInput[ind+PATCHWIDTH*PATCHWIDTH] = float(1.0)/float(PATCHWIDTH);
#endif
}
__global__ void globalCuts(float * lF,float* rF, float* bI, int dSfS, int ps)
{
const unsigned int x = blockIdx.y*ps + threadIdx.y;
const unsigned int y = blockIdx.x*ps + threadIdx.x;
const unsigned int batchx = blockIdx.x + blockIdx.y*ROWPATCHNUM;
const unsigned int batchy = threadIdx.x+ threadIdx.y*PATCHWIDTH;
__shared__ float sbatchInput[256];
sbatchInput[batchy] = lF[y*dSfS+x];
sbatchInput[batchy+128] = rF[y*dSfS+x];
zeroMeanBatchs(sbatchInput,batchy);
deviceNormalizeBatchs(bI,sbatchInput,batchx,batchy);
}
void BatchInput::GpuCut()
{
globalCuts<<<dim3(ROWPATCHNUM,ROWPATCHNUM),dim3(PATCHWIDTH,PATCHWIDTH)>>>(leftFoveaWh,rightFoveaWh,batchInput,downSampled_foveaSize,patchShift);
cudaThreadSynchronize();
}
__global__ void real2complex (float *a, cufftComplex *c, int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < N && idy <N)
{
int index = idx + idy*N;
c[index].x = a[index];
c[index].y = 0.f;
}
__syncthreads();
}
/*compute idx and idy, the location of the element in the original NxN array*/
__global__ void complex2real_scaled (cufftComplex *c, float *a, int M, int N, float scale)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( (idx>=(M-N)/2)&&(idx<M-(M-N)/2) && (idy>=(M-N)/2)&&(idy<M-(M-N)/2))
{
int index = idx + idy*M;
int index2 = idx-(M-N)/2 + (idy-(M-N)/2)*N;
a[index2] = scale*c[index].x ;
}
__syncthreads();
}
inline __device__ void mulAndScale(cuComplex& a, const cuComplex& b, const float& c){
cuComplex t = {c * (a.x * b.x - a.y * b.y), c * (a.y * b.x + a.x * b.y)};
a = t;
}
__global__ void modulateAndNormalize_kernel(cuComplex *d_Dst, cuComplex *d_Src, int N, float c )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
cuComplex a,b;
if ( idx < N && idy <N)
{
int i = idx + idy*N;
a = d_Src[i];
b = d_Dst[i];
mulAndScale(a, b, c);
d_Dst[i] = a;
}
__syncthreads();
}
void BatchInput::imgWhitening()
{
real2complex<<< dim3(16,16), dim3(16,16)>>>(leftFovea, d_dataL, FILT_WIDTH);
real2complex<<< dim3(16,16), dim3(16,16)>>>(rightFovea, d_dataR, FILT_WIDTH);
cudaThreadSynchronize();
cufftExecC2C(fftPlan, (cufftComplex *)d_dataL, (cufftComplex *)d_DataSpectrumL,CUFFT_FORWARD );
cufftExecC2C(fftPlan, (cufftComplex *)d_dataR, (cufftComplex *)d_DataSpectrumR,CUFFT_FORWARD );
modulateAndNormalize_kernel<<<dim3(16,16), dim3(16,16)>>> (d_DataSpectrumL, d_filter, FILT_WIDTH , 1);
modulateAndNormalize_kernel<<<dim3(16,16), dim3(16,16)>>> (d_DataSpectrumR, d_filter, FILT_WIDTH , 1);
cudaThreadSynchronize();
cufftExecC2C(fftPlan, (cufftComplex *)d_DataSpectrumL, (cufftComplex *)d_resultL,CUFFT_INVERSE );
cufftExecC2C(fftPlan, (cufftComplex *)d_DataSpectrumR, (cufftComplex *)d_resultR,CUFFT_INVERSE );
complex2real_scaled<<< dim3(16,16), dim3(16,16)>>>(d_resultL, leftFoveaWh, FILT_WIDTH, downSampled_foveaSize, float(1)/(FILT_WIDTH*FILT_WIDTH) );
complex2real_scaled<<< dim3(16,16), dim3(16,16)>>>(d_resultR, rightFoveaWh, FILT_WIDTH, downSampled_foveaSize, float(1)/(FILT_WIDTH*FILT_WIDTH));
}
void BatchInput::initFilt(float* f)
{
filtdata=f;
cudaMemcpy(d_filterReal,filtdata,sizeof(float)*FILT_WIDTH*FILT_WIDTH,cudaMemcpyHostToDevice);
real2complex<<< dim3(16,16), dim3(16,16)>>>(d_filterReal, d_filter, FILT_WIDTH);
} |
79a9bf5fc98e2cefdfb1ee5e09831ea810fbcbbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
//#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 32
#define C 256
#define H 15
#define W 15
#define R 3
#define S 3
#define M 384
#define E 13
#define F 13
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ float s_w[R*S];
__shared__ float s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
//s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169];
//s_i[s_i_idx+338] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+338];
//s_i[s_i_idx+507] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+507];
if(s_i_idx+169 < H*W)
s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169];
}
__syncthreads();
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
float3 ip = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col));
float3 wt = *((float3*)(s_w+i*wt_width));
prod += ip.x*wt.x+ip.y*wt.y+ip.z*wt.z;
__syncthreads();
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c==0) || (d==0) || (c==14) || (d==14))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,384,256);
dim3 dimBlock(13,13,1);
dim3 dimGridRed(batch_size,384,1);
dim3 dimBlockRed(13,13,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,13,13,1,15,3,384,batch_size,256);
hipDeviceSynchronize();hipLaunchKernelGGL((
red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,256,batch_size,384);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//hipDeviceSynchronize();
hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_3_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
// float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
| 79a9bf5fc98e2cefdfb1ee5e09831ea810fbcbbe.cu | #include <stdio.h>
#include <iostream>
//#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 32
#define C 256
#define H 15
#define W 15
#define R 3
#define S 3
#define M 384
#define E 13
#define F 13
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ float s_w[R*S];
__shared__ float s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
//s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169];
//s_i[s_i_idx+338] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+338];
//s_i[s_i_idx+507] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+507];
if(s_i_idx+169 < H*W)
s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169];
}
__syncthreads();
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
float3 ip = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col));
float3 wt = *((float3*)(s_w+i*wt_width));
prod += ip.x*wt.x+ip.y*wt.y+ip.z*wt.z;
__syncthreads();
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c==0) || (d==0) || (c==14) || (d==14))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,384,256);
dim3 dimBlock(13,13,1);
dim3 dimGridRed(batch_size,384,1);
dim3 dimBlockRed(13,13,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,13,13,1,15,3,384,batch_size,256);
cudaDeviceSynchronize();
red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,256,batch_size,384);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//cudaDeviceSynchronize();
cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_3_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
// float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
e63ff16369284af76491e1e6c0e34e847a85bcf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_15.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5947213128563,0.00128280499974890,0.780358835699496,0.780221511728293,0.000174134035873362,0.485365577210685,0.00293476383879643,0.999998356711186,1.92498350501348e-08,1.88440562227888e-05,0.999771869331078,1.00650785537824,0.999977589106241,5.87636528842966e-05,0.588270003699565,9.12980854435061,140.402413298159}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.5931143418548,0.000234305879325739,0.000153733035636004,0.000521570747646260,0.278749841477854,0.152665673487231,0.188787297178472,3.91040967347224,0.0200695067032522,2.72218041991663,1099.67975070177,0.000551753358592525,0.133204533378619,0.0132111010630101,0.00494272025642177,6.68557857945522e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| e63ff16369284af76491e1e6c0e34e847a85bcf5.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_15.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5947213128563,0.00128280499974890,0.780358835699496,0.780221511728293,0.000174134035873362,0.485365577210685,0.00293476383879643,0.999998356711186,1.92498350501348e-08,1.88440562227888e-05,0.999771869331078,1.00650785537824,0.999977589106241,5.87636528842966e-05,0.588270003699565,9.12980854435061,140.402413298159}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.5931143418548,0.000234305879325739,0.000153733035636004,0.000521570747646260,0.278749841477854,0.152665673487231,0.188787297178472,3.91040967347224,0.0200695067032522,2.72218041991663,1099.67975070177,0.000551753358592525,0.133204533378619,0.0132111010630101,0.00494272025642177,6.68557857945522e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
527b27b83024e07f108c874e6e853b12c74027f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstddef>
#include <stdio.h>
namespace vecops {
template<class T>
__global__
void adder(T* ar1, T* ar2, T* result, const std::size_t ar_size) {
for (std::size_t index = 0; index < ar_size; index += 1) {
//printf("Adding numbers: %d, %d\n", ar1[index], ar2[index]);
result[index] = ar1[index] + ar2[index];
}
}
} | 527b27b83024e07f108c874e6e853b12c74027f4.cu | #include <cstddef>
#include <stdio.h>
namespace vecops {
template<class T>
__global__
void adder(T* ar1, T* ar2, T* result, const std::size_t ar_size) {
for (std::size_t index = 0; index < ar_size; index += 1) {
//printf("Adding numbers: %d, %d\n", ar1[index], ar2[index]);
result[index] = ar1[index] + ar2[index];
}
}
} |
ee1b353e1867b4dfc0f6e3883895c9f87b48d7f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "getLowerAAt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
double *S = NULL;
hipMalloc(&S, XSIZE*YSIZE);
std::size_t imageNum = 1;
std::size_t pixelNum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
getLowerAAt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,S,imageNum,pixelNum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
getLowerAAt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,S,imageNum,pixelNum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
getLowerAAt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,S,imageNum,pixelNum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ee1b353e1867b4dfc0f6e3883895c9f87b48d7f1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "getLowerAAt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
double *S = NULL;
cudaMalloc(&S, XSIZE*YSIZE);
std::size_t imageNum = 1;
std::size_t pixelNum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
getLowerAAt<<<gridBlock,threadBlock>>>(A,S,imageNum,pixelNum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
getLowerAAt<<<gridBlock,threadBlock>>>(A,S,imageNum,pixelNum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
getLowerAAt<<<gridBlock,threadBlock>>>(A,S,imageNum,pixelNum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bb02a3e22d58a59b3e684a6b0138db5e416380dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file imagenet_vgg.cu
* @brief A VGG-16 network for ImageNet.
* @author Ang Li (PNNL)
*
*/
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <iostream>
#include <string>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "sbnn32_param.h"
#include "sbnn64_param.h"
#include "sbnn32.cuh"
#include "sbnn64.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
int main32();
int main64();
int main()
{
//main32();
main64();
}
__global__ void vggnet32(
In32Conv32LayerParam* bconv1,
Conv32LayerParam* bconv2,
Conv32LayerParam* bconv3,
Conv32LayerParam* bconv4,
Conv32LayerParam* bconv5,
Conv32LayerParam* bconv6,
Conv32LayerParam* bconv7,
Conv32LayerParam* bconv8,
Conv32LayerParam* bconv9,
Conv32LayerParam* bconv10,
Conv32LayerParam* bconv11,
Conv32LayerParam* bconv12,
Conv32LayerParam* bconv13,
Fc32LayerParam* bfc1,
Fc32LayerParam* bfc2,
Out32LayerParam* bout)
{
grid_group grid = this_grid();
//========= Conv1 ============
In32Conv32Layer(bconv1);
grid.sync();
//========= Conv2 ============
ConvPool32Layer(bconv2);
grid.sync();
//========= Conv3 ============
Conv32Layer(bconv3);
grid.sync();
//========= Conv4 ============
ConvPool32Layer(bconv4);
grid.sync();
//========= Conv5 ============
Conv32Layer(bconv5);
grid.sync();
//========= Conv6 ============
Conv32Layer(bconv6);
grid.sync();
//========= Conv7 ============
ConvPool32Layer(bconv7);
grid.sync();
//========= Conv8 ============
Conv32Layer(bconv8);
grid.sync();
//========= Conv9 ============
Conv32Layer(bconv9);
grid.sync();
//========= Conv10 ============
ConvPool32Layer(bconv10);
grid.sync();
//========= Conv11 ============
Conv32Layer(bconv11);
grid.sync();
//========= Conv12 ============
Conv32Layer(bconv12);
grid.sync();
//========= Conv13 ============
ConvPool32Layer(bconv13);
grid.sync();
//========= Fc1 ============
Fc32Layer(bfc1);
//Fc32LayerBatched(bfc1);
grid.sync();
//========= Fc2 ============
Fc32Layer(bfc2);
//Fc32LayerBatched(bfc2);
grid.sync();
////========== Output ===========
Out32Layer(bout);
//Out32LayerBatched(bout);
}
__global__ void vggnet64(
In32Conv64LayerParam* bconv1,
Conv64LayerParam* bconv2,
Conv64LayerParam* bconv3,
Conv64LayerParam* bconv4,
Conv64LayerParam* bconv5,
Conv64LayerParam* bconv6,
Conv64LayerParam* bconv7,
Conv64LayerParam* bconv8,
Conv64LayerParam* bconv9,
Conv64LayerParam* bconv10,
Conv64LayerParam* bconv11,
Conv64LayerParam* bconv12,
Conv64LayerParam* bconv13,
Fc64LayerParam* bfc1,
Fc64LayerParam* bfc2,
Out64LayerParam* bout)
{
grid_group grid = this_grid();
SET_KERNEL_TIMER;
//========= Conv1 ============
In32Conv64Layer(bconv1);
grid.sync();
TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
ConvPool64Layer(bconv2);
grid.sync();
TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv64Layer(bconv3);
grid.sync();
TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
ConvPool64Layer(bconv4);
grid.sync();
TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv64Layer(bconv5);
grid.sync();
TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv64Layer(bconv6);
grid.sync();
TICK_KERNEL_TIMER(bconv6);
//========= Conv7 ============
ConvPool64Layer(bconv7);
grid.sync();
TICK_KERNEL_TIMER(bconv7);
//========= Conv8 ============
Conv64Layer(bconv8);
grid.sync();
TICK_KERNEL_TIMER(bconv8);
//========= Conv9 ============
Conv64Layer(bconv9);
grid.sync();
TICK_KERNEL_TIMER(bconv9);
//========= Conv10 ============
ConvPool64Layer(bconv10);
grid.sync();
TICK_KERNEL_TIMER(bconv10);
//========= Conv11 ============
Conv64Layer(bconv11);
grid.sync();
TICK_KERNEL_TIMER(bconv11);
//========= Conv12 ============
Conv64Layer(bconv12);
grid.sync();
TICK_KERNEL_TIMER(bconv12);
//========= Conv13 ============
ConvPool64Layer(bconv13);
grid.sync();
TICK_KERNEL_TIMER(bconv13);
//========= Fc1 ============
//Fc64Layer(bfc1);
Fc64LayerBatched(bfc1);
grid.sync();
TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
//Fc64Layer(bfc2);
Fc64LayerBatched(bfc2);
grid.sync();
TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
//Out64Layer(bout);
Out64LayerBatched(bout);
TICK_KERNEL_TIMER(bout);
}
int main32()
{
int dev = 5;
hipSetDevice(dev);
const unsigned batch = 32;
const unsigned output_size = 1000;
const unsigned image_height = 224;
const unsigned image_width = 224;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 4096;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("../pytorch/BinaryNet/vgg_imagenet.csv","r");
//================ Set Network =================
//Bconv1 Layer
In32Conv32LayerParam* bconv1 = new In32Conv32LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 64, batch);
In32Conv32LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv32LayerParam* bconv2 = new Conv32LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 64, 64, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv32LayerParam* bconv3 = new Conv32LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 64, 128, batch);
Conv32LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv32LayerParam* bconv4 = new Conv32LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv32LayerParam* bconv5 = new Conv32LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 128, 256, batch);
Conv32LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv32LayerParam* bconv6 = new Conv32LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 256, 256, batch);
Conv32LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Bconv7 Layer
Conv32LayerParam* bconv7 = new Conv32LayerParam("Conv7", bconv6->output_height,
bconv6->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv7_gpu = bconv7->initialize(config_file, bconv6->get_output_gpu());
//Bconv8 Layer
Conv32LayerParam* bconv8 = new Conv32LayerParam("Conv8", bconv7->output_height,
bconv7->output_width, filter_height, filter_width, 256, 512, batch);
Conv32LayerParam* bconv8_gpu = bconv8->initialize(config_file, bconv7->get_output_gpu());
//Bconv9 Layer
Conv32LayerParam* bconv9 = new Conv32LayerParam("Conv9", bconv8->output_height,
bconv8->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv9_gpu = bconv9->initialize(config_file, bconv8->get_output_gpu());
//Bconv10 Layer
Conv32LayerParam* bconv10 = new Conv32LayerParam("Conv10", bconv9->output_height,
bconv9->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv10_gpu = bconv10->initialize(config_file, bconv9->get_output_gpu());
//Bconv11 Layer
Conv32LayerParam* bconv11 = new Conv32LayerParam("Conv11", bconv10->output_height,
bconv10->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv11_gpu = bconv11->initialize(config_file, bconv10->get_output_gpu());
//Bconv12 Layer
Conv32LayerParam* bconv12 = new Conv32LayerParam("Conv12", bconv11->output_height,
bconv11->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv12_gpu = bconv12->initialize(config_file, bconv11->get_output_gpu());
//Bconv13 Layer
Conv32LayerParam* bconv13 = new Conv32LayerParam("Conv13", bconv12->output_height,
bconv12->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv32LayerParam* bconv13_gpu = bconv13->initialize(config_file, bconv12->get_output_gpu());
//Fc1 Layer
Fc32LayerParam* bfc1 = new Fc32LayerParam("Fc1", batch, (bconv13->output_height)
*(bconv13->output_width)*512, n_hidden);
Fc32LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv13->get_output_gpu());
//Fc2 Layer
Fc32LayerParam* bfc2 = new Fc32LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc32LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out32LayerParam* bout = new Out32LayerParam("Fout", batch, n_hidden, output_size, true);
Out32LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 512*sizeof(int)*32;
hipFuncSetAttribute(vggnet32, hipFuncAttributeMaxDynamicSharedMemorySize,98304);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet32, numThreads, shared_memory);
//hipFuncSetAttribute(alexnet32, hipFuncAttributePreferredSharedMemoryCarveout,0);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bconv7_gpu, &bconv8_gpu, &bconv9_gpu, &bconv10_gpu, &bconv11_gpu, &bconv12_gpu, &bconv13_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
hipLaunchCooperativeKernel((void*)vggnet32, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
STOP_TIMER;
//float* ss = bfc1->download_full_output();
//int a = 20980;
//int b = 21080;
//int max_width = 4;
//for (int i=a; i<b; i++)
//{
//printf("%*.0f ",max_width, ss[i]);
//if ( (i-a+1)%18 == 0)
//printf("\n");
//}
//printf("\n");
//================ Output =================
float* output = bout->download_output();
validate_prediction(output, image_labels, output_size, batch);
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bconv7;
delete bconv8;
delete bconv9;
delete bconv10;
delete bconv11;
delete bconv12;
delete bconv13;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
int main64()
{
int dev = 4;
hipSetDevice(dev);
const unsigned batch = 32;
const unsigned output_size = 1000;
const unsigned image_height = 224;
const unsigned image_width = 224;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 4096;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("../pytorch/BinaryNet/vgg_imagenet.csv","r");
//================ Set Network =================
//Bconv1 Layer
In32Conv64LayerParam* bconv1 = new In32Conv64LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 64, batch);
In32Conv64LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv64LayerParam* bconv2 = new Conv64LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 64, 64, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv64LayerParam* bconv3 = new Conv64LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 64, 128, batch);
Conv64LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv64LayerParam* bconv4 = new Conv64LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv64LayerParam* bconv5 = new Conv64LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 128, 256, batch);
Conv64LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv64LayerParam* bconv6 = new Conv64LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 256, 256, batch);
Conv64LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Bconv7 Layer
Conv64LayerParam* bconv7 = new Conv64LayerParam("Conv7", bconv6->output_height,
bconv6->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv7_gpu = bconv7->initialize(config_file, bconv6->get_output_gpu());
//Bconv8 Layer
Conv64LayerParam* bconv8 = new Conv64LayerParam("Conv8", bconv7->output_height,
bconv7->output_width, filter_height, filter_width, 256, 512, batch);
Conv64LayerParam* bconv8_gpu = bconv8->initialize(config_file, bconv7->get_output_gpu());
//Bconv9 Layer
Conv64LayerParam* bconv9 = new Conv64LayerParam("Conv9", bconv8->output_height,
bconv8->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv9_gpu = bconv9->initialize(config_file, bconv8->get_output_gpu());
//Bconv10 Layer
Conv64LayerParam* bconv10 = new Conv64LayerParam("Conv10", bconv9->output_height,
bconv9->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv10_gpu = bconv10->initialize(config_file, bconv9->get_output_gpu());
//Bconv11 Layer
Conv64LayerParam* bconv11 = new Conv64LayerParam("Conv11", bconv10->output_height,
bconv10->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv11_gpu = bconv11->initialize(config_file, bconv10->get_output_gpu());
//Bconv12 Layer
Conv64LayerParam* bconv12 = new Conv64LayerParam("Conv12", bconv11->output_height,
bconv11->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv12_gpu = bconv12->initialize(config_file, bconv11->get_output_gpu());
//Bconv13 Layer
Conv64LayerParam* bconv13 = new Conv64LayerParam("Conv13", bconv12->output_height,
bconv12->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv64LayerParam* bconv13_gpu = bconv13->initialize(config_file, bconv12->get_output_gpu());
//Fc1 Layer
Fc64LayerParam* bfc1 = new Fc64LayerParam("Fc1", batch, (bconv13->output_height)
*(bconv13->output_width)*512, n_hidden);
Fc64LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv13->get_output_gpu());
//Fc2 Layer
Fc64LayerParam* bfc2 = new Fc64LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc64LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out64LayerParam* bout = new Out64LayerParam("Fout", batch, n_hidden, output_size, true);
Out64LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 512*sizeof(int)*32;
hipFuncSetAttribute(vggnet64, hipFuncAttributeMaxDynamicSharedMemorySize,98304);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet64, numThreads, shared_memory);
//hipFuncSetAttribute(alexnet64, hipFuncAttributePreferredSharedMemoryCarveout,0);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bconv7_gpu, &bconv8_gpu, &bconv9_gpu, &bconv10_gpu, &bconv11_gpu, &bconv12_gpu, &bconv13_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
hipLaunchCooperativeKernel((void*)vggnet64, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
//vggnet64<<<numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, shared_memory>>> (
//bconv1_gpu, bconv2_gpu, bconv3_gpu, bconv4_gpu, bconv5_gpu, bfc1_gpu, bfc2_gpu, bout_gpu);
STOP_TIMER;
float* output = bout->download_output();
validate_prediction(output, image_labels, output_size, batch);
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bconv7;
delete bconv8;
delete bconv9;
delete bconv10;
delete bconv11;
delete bconv12;
delete bconv13;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
| bb02a3e22d58a59b3e684a6b0138db5e416380dc.cu | /** @file imagenet_vgg.cu
* @brief A VGG-16 network for ImageNet.
* @author Ang Li (PNNL)
*
*/
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <iostream>
#include <string>
#include <cooperative_groups.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "utility.h"
#include "sbnn32_param.h"
#include "sbnn64_param.h"
#include "sbnn32.cuh"
#include "sbnn64.cuh"
#include "data.h"
using namespace cooperative_groups;
using namespace std;
int main32();
int main64();
int main()
{
//main32();
main64();
}
__global__ void vggnet32(
In32Conv32LayerParam* bconv1,
Conv32LayerParam* bconv2,
Conv32LayerParam* bconv3,
Conv32LayerParam* bconv4,
Conv32LayerParam* bconv5,
Conv32LayerParam* bconv6,
Conv32LayerParam* bconv7,
Conv32LayerParam* bconv8,
Conv32LayerParam* bconv9,
Conv32LayerParam* bconv10,
Conv32LayerParam* bconv11,
Conv32LayerParam* bconv12,
Conv32LayerParam* bconv13,
Fc32LayerParam* bfc1,
Fc32LayerParam* bfc2,
Out32LayerParam* bout)
{
grid_group grid = this_grid();
//========= Conv1 ============
In32Conv32Layer(bconv1);
grid.sync();
//========= Conv2 ============
ConvPool32Layer(bconv2);
grid.sync();
//========= Conv3 ============
Conv32Layer(bconv3);
grid.sync();
//========= Conv4 ============
ConvPool32Layer(bconv4);
grid.sync();
//========= Conv5 ============
Conv32Layer(bconv5);
grid.sync();
//========= Conv6 ============
Conv32Layer(bconv6);
grid.sync();
//========= Conv7 ============
ConvPool32Layer(bconv7);
grid.sync();
//========= Conv8 ============
Conv32Layer(bconv8);
grid.sync();
//========= Conv9 ============
Conv32Layer(bconv9);
grid.sync();
//========= Conv10 ============
ConvPool32Layer(bconv10);
grid.sync();
//========= Conv11 ============
Conv32Layer(bconv11);
grid.sync();
//========= Conv12 ============
Conv32Layer(bconv12);
grid.sync();
//========= Conv13 ============
ConvPool32Layer(bconv13);
grid.sync();
//========= Fc1 ============
Fc32Layer(bfc1);
//Fc32LayerBatched(bfc1);
grid.sync();
//========= Fc2 ============
Fc32Layer(bfc2);
//Fc32LayerBatched(bfc2);
grid.sync();
////========== Output ===========
Out32Layer(bout);
//Out32LayerBatched(bout);
}
__global__ void vggnet64(
In32Conv64LayerParam* bconv1,
Conv64LayerParam* bconv2,
Conv64LayerParam* bconv3,
Conv64LayerParam* bconv4,
Conv64LayerParam* bconv5,
Conv64LayerParam* bconv6,
Conv64LayerParam* bconv7,
Conv64LayerParam* bconv8,
Conv64LayerParam* bconv9,
Conv64LayerParam* bconv10,
Conv64LayerParam* bconv11,
Conv64LayerParam* bconv12,
Conv64LayerParam* bconv13,
Fc64LayerParam* bfc1,
Fc64LayerParam* bfc2,
Out64LayerParam* bout)
{
grid_group grid = this_grid();
SET_KERNEL_TIMER;
//========= Conv1 ============
In32Conv64Layer(bconv1);
grid.sync();
TICK_KERNEL_TIMER(bconv1);
//========= Conv2 ============
ConvPool64Layer(bconv2);
grid.sync();
TICK_KERNEL_TIMER(bconv2);
//========= Conv3 ============
Conv64Layer(bconv3);
grid.sync();
TICK_KERNEL_TIMER(bconv3);
//========= Conv4 ============
ConvPool64Layer(bconv4);
grid.sync();
TICK_KERNEL_TIMER(bconv4);
//========= Conv5 ============
Conv64Layer(bconv5);
grid.sync();
TICK_KERNEL_TIMER(bconv5);
//========= Conv6 ============
Conv64Layer(bconv6);
grid.sync();
TICK_KERNEL_TIMER(bconv6);
//========= Conv7 ============
ConvPool64Layer(bconv7);
grid.sync();
TICK_KERNEL_TIMER(bconv7);
//========= Conv8 ============
Conv64Layer(bconv8);
grid.sync();
TICK_KERNEL_TIMER(bconv8);
//========= Conv9 ============
Conv64Layer(bconv9);
grid.sync();
TICK_KERNEL_TIMER(bconv9);
//========= Conv10 ============
ConvPool64Layer(bconv10);
grid.sync();
TICK_KERNEL_TIMER(bconv10);
//========= Conv11 ============
Conv64Layer(bconv11);
grid.sync();
TICK_KERNEL_TIMER(bconv11);
//========= Conv12 ============
Conv64Layer(bconv12);
grid.sync();
TICK_KERNEL_TIMER(bconv12);
//========= Conv13 ============
ConvPool64Layer(bconv13);
grid.sync();
TICK_KERNEL_TIMER(bconv13);
//========= Fc1 ============
//Fc64Layer(bfc1);
Fc64LayerBatched(bfc1);
grid.sync();
TICK_KERNEL_TIMER(bfc1);
//========= Fc2 ============
//Fc64Layer(bfc2);
Fc64LayerBatched(bfc2);
grid.sync();
TICK_KERNEL_TIMER(bfc2);
////========== Output ===========
//Out64Layer(bout);
Out64LayerBatched(bout);
TICK_KERNEL_TIMER(bout);
}
int main32()
{
int dev = 5;
cudaSetDevice(dev);
const unsigned batch = 32;
const unsigned output_size = 1000;
const unsigned image_height = 224;
const unsigned image_width = 224;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 4096;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("../pytorch/BinaryNet/vgg_imagenet.csv","r");
//================ Set Network =================
//Bconv1 Layer
In32Conv32LayerParam* bconv1 = new In32Conv32LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 64, batch);
In32Conv32LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv32LayerParam* bconv2 = new Conv32LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 64, 64, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv32LayerParam* bconv3 = new Conv32LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 64, 128, batch);
Conv32LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv32LayerParam* bconv4 = new Conv32LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv32LayerParam* bconv5 = new Conv32LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 128, 256, batch);
Conv32LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv32LayerParam* bconv6 = new Conv32LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 256, 256, batch);
Conv32LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Bconv7 Layer
Conv32LayerParam* bconv7 = new Conv32LayerParam("Conv7", bconv6->output_height,
bconv6->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv7_gpu = bconv7->initialize(config_file, bconv6->get_output_gpu());
//Bconv8 Layer
Conv32LayerParam* bconv8 = new Conv32LayerParam("Conv8", bconv7->output_height,
bconv7->output_width, filter_height, filter_width, 256, 512, batch);
Conv32LayerParam* bconv8_gpu = bconv8->initialize(config_file, bconv7->get_output_gpu());
//Bconv9 Layer
Conv32LayerParam* bconv9 = new Conv32LayerParam("Conv9", bconv8->output_height,
bconv8->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv9_gpu = bconv9->initialize(config_file, bconv8->get_output_gpu());
//Bconv10 Layer
Conv32LayerParam* bconv10 = new Conv32LayerParam("Conv10", bconv9->output_height,
bconv9->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, false);
Conv32LayerParam* bconv10_gpu = bconv10->initialize(config_file, bconv9->get_output_gpu());
//Bconv11 Layer
Conv32LayerParam* bconv11 = new Conv32LayerParam("Conv11", bconv10->output_height,
bconv10->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv11_gpu = bconv11->initialize(config_file, bconv10->get_output_gpu());
//Bconv12 Layer
Conv32LayerParam* bconv12 = new Conv32LayerParam("Conv12", bconv11->output_height,
bconv11->output_width, filter_height, filter_width, 512, 512, batch);
Conv32LayerParam* bconv12_gpu = bconv12->initialize(config_file, bconv11->get_output_gpu());
//Bconv13 Layer
Conv32LayerParam* bconv13 = new Conv32LayerParam("Conv13", bconv12->output_height,
bconv12->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv32LayerParam* bconv13_gpu = bconv13->initialize(config_file, bconv12->get_output_gpu());
//Fc1 Layer
Fc32LayerParam* bfc1 = new Fc32LayerParam("Fc1", batch, (bconv13->output_height)
*(bconv13->output_width)*512, n_hidden);
Fc32LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv13->get_output_gpu());
//Fc2 Layer
Fc32LayerParam* bfc2 = new Fc32LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc32LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out32LayerParam* bout = new Out32LayerParam("Fout", batch, n_hidden, output_size, true);
Out32LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 512*sizeof(int)*32;
cudaFuncSetAttribute(vggnet32, cudaFuncAttributeMaxDynamicSharedMemorySize,98304);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet32, numThreads, shared_memory);
//cudaFuncSetAttribute(alexnet32, cudaFuncAttributePreferredSharedMemoryCarveout,0);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bconv7_gpu, &bconv8_gpu, &bconv9_gpu, &bconv10_gpu, &bconv11_gpu, &bconv12_gpu, &bconv13_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
cudaLaunchCooperativeKernel((void*)vggnet32, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
STOP_TIMER;
//float* ss = bfc1->download_full_output();
//int a = 20980;
//int b = 21080;
//int max_width = 4;
//for (int i=a; i<b; i++)
//{
//printf("%*.0f ",max_width, ss[i]);
//if ( (i-a+1)%18 == 0)
//printf("\n");
//}
//printf("\n");
//================ Output =================
float* output = bout->download_output();
validate_prediction(output, image_labels, output_size, batch);
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bconv7;
delete bconv8;
delete bconv9;
delete bconv10;
delete bconv11;
delete bconv12;
delete bconv13;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
int main64()
{
int dev = 4;
cudaSetDevice(dev);
const unsigned batch = 32;
const unsigned output_size = 1000;
const unsigned image_height = 224;
const unsigned image_width = 224;
const unsigned image_channel = 3;
const unsigned filter_height = 3;
const unsigned filter_width = 3;
const unsigned n_hidden = 4096;
//=============== Get Input and Label =================
float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float));
unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned));
read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch);
//================ Get Weight =================
FILE* config_file = fopen("../pytorch/BinaryNet/vgg_imagenet.csv","r");
//================ Set Network =================
//Bconv1 Layer
In32Conv64LayerParam* bconv1 = new In32Conv64LayerParam("Conv1", image_height, image_width,
filter_height, filter_width, 3, 64, batch);
In32Conv64LayerParam* bconv1_gpu = bconv1->initialize(images, config_file);
//Bconv2 Layer
Conv64LayerParam* bconv2 = new Conv64LayerParam("Conv2", bconv1->output_height,
bconv1->output_width, filter_height, filter_width, 64, 64, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu());
//Bconv3 Layer
Conv64LayerParam* bconv3 = new Conv64LayerParam("Conv3", bconv2->output_height,
bconv2->output_width, filter_height, filter_width, 64, 128, batch);
Conv64LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu());
//Bconv4 Layer
Conv64LayerParam* bconv4 = new Conv64LayerParam("Conv4", bconv3->output_height,
bconv3->output_width, filter_height, filter_width, 128, 128, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu());
//Bconv5 Layer
Conv64LayerParam* bconv5 = new Conv64LayerParam("Conv5", bconv4->output_height,
bconv4->output_width, filter_height, filter_width, 128, 256, batch);
Conv64LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu());
//Bconv6 Layer
Conv64LayerParam* bconv6 = new Conv64LayerParam("Conv6", bconv5->output_height,
bconv5->output_width, filter_height, filter_width, 256, 256, batch);
Conv64LayerParam* bconv6_gpu = bconv6->initialize(config_file, bconv5->get_output_gpu());
//Bconv7 Layer
Conv64LayerParam* bconv7 = new Conv64LayerParam("Conv7", bconv6->output_height,
bconv6->output_width, filter_height, filter_width, 256, 256, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv7_gpu = bconv7->initialize(config_file, bconv6->get_output_gpu());
//Bconv8 Layer
Conv64LayerParam* bconv8 = new Conv64LayerParam("Conv8", bconv7->output_height,
bconv7->output_width, filter_height, filter_width, 256, 512, batch);
Conv64LayerParam* bconv8_gpu = bconv8->initialize(config_file, bconv7->get_output_gpu());
//Bconv9 Layer
Conv64LayerParam* bconv9 = new Conv64LayerParam("Conv9", bconv8->output_height,
bconv8->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv9_gpu = bconv9->initialize(config_file, bconv8->get_output_gpu());
//Bconv10 Layer
Conv64LayerParam* bconv10 = new Conv64LayerParam("Conv10", bconv9->output_height,
bconv9->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, false);
Conv64LayerParam* bconv10_gpu = bconv10->initialize(config_file, bconv9->get_output_gpu());
//Bconv11 Layer
Conv64LayerParam* bconv11 = new Conv64LayerParam("Conv11", bconv10->output_height,
bconv10->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv11_gpu = bconv11->initialize(config_file, bconv10->get_output_gpu());
//Bconv12 Layer
Conv64LayerParam* bconv12 = new Conv64LayerParam("Conv12", bconv11->output_height,
bconv11->output_width, filter_height, filter_width, 512, 512, batch);
Conv64LayerParam* bconv12_gpu = bconv12->initialize(config_file, bconv11->get_output_gpu());
//Bconv13 Layer
Conv64LayerParam* bconv13 = new Conv64LayerParam("Conv13", bconv12->output_height,
bconv12->output_width, filter_height, filter_width, 512, 512, batch, 1, 1,
true, 2, 2, true);
Conv64LayerParam* bconv13_gpu = bconv13->initialize(config_file, bconv12->get_output_gpu());
//Fc1 Layer
Fc64LayerParam* bfc1 = new Fc64LayerParam("Fc1", batch, (bconv13->output_height)
*(bconv13->output_width)*512, n_hidden);
Fc64LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv13->get_output_gpu());
//Fc2 Layer
Fc64LayerParam* bfc2 = new Fc64LayerParam("Fc2", batch, n_hidden, n_hidden);
Fc64LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu());
//Out Layer
Out64LayerParam* bout = new Out64LayerParam("Fout", batch, n_hidden, output_size, true);
Out64LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu());
//================ Setup Kernel =================
int numThreads = 1024;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int numBlocksPerSm;
int shared_memory = 512*sizeof(int)*32;
cudaFuncSetAttribute(vggnet64, cudaFuncAttributeMaxDynamicSharedMemorySize,98304);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, vggnet64, numThreads, shared_memory);
//cudaFuncSetAttribute(alexnet64, cudaFuncAttributePreferredSharedMemoryCarveout,0);
void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bconv6_gpu,
&bconv7_gpu, &bconv8_gpu, &bconv9_gpu, &bconv10_gpu, &bconv11_gpu, &bconv12_gpu, &bconv13_gpu,
&bfc1_gpu, &bfc2_gpu, &bout_gpu};
START_TIMER;
cudaLaunchCooperativeKernel((void*)vggnet64, numBlocksPerSm*deviceProp.multiProcessorCount,
numThreads, args, shared_memory);
//vggnet64<<<numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, shared_memory>>> (
//bconv1_gpu, bconv2_gpu, bconv3_gpu, bconv4_gpu, bconv5_gpu, bfc1_gpu, bfc2_gpu, bout_gpu);
STOP_TIMER;
float* output = bout->download_output();
validate_prediction(output, image_labels, output_size, batch);
delete bconv1;
delete bconv2;
delete bconv3;
delete bconv4;
delete bconv5;
delete bconv6;
delete bconv7;
delete bconv8;
delete bconv9;
delete bconv10;
delete bconv11;
delete bconv12;
delete bconv13;
delete bfc1;
delete bfc2;
delete bout;
return 0;
}
|
7afd31ac71a798cef65be1d05effbed42eb5c6ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <voxelpipe/compact_ranges.h>
#include "thrust_arch.h"
#include <thrust/scan.h>
#define CTA_SIZE 512
#define CTA_H (512/32)
namespace voxelpipe {
namespace compact {
__forceinline__ __device__ int scan_popc(bool p, int& popc, const int tidx, volatile int *red)
{
const uint32 mask = __ballot( p );
popc = __popc( mask );
return __popc( mask << (32 - tidx) );
}
// intra-warp inclusive scan
__forceinline__ __device__ void scan_warp(int tidx, unsigned int limit, volatile int *red)
{
const uint32 val = red[tidx];
// pad initial segment with zeros
red[tidx] = 0;
red += 32;
// Hillis-Steele scan
red[tidx] = val;
red[tidx] += red[tidx-1];
red[tidx] += red[tidx-2];
red[tidx] += red[tidx-4];
red[tidx] += red[tidx-8];
red[tidx] += red[tidx-16];
// propagate resullpv back
red[tidx-32] = red[tidx];
}
__forceinline__ __device__ int scan_popc(bool valid, volatile int* sm_warp_popc)
{
int idx = threadIdx.x;
int tidx = threadIdx.x & 31;
int widx = threadIdx.x / 32;
__shared__ volatile int sm_red[CTA_SIZE*2];
volatile int *sm_warp_red = sm_red + widx*64;
int popc;
int eidx = scan_popc(valid,popc,tidx,sm_warp_red);
if (tidx == 0)
sm_warp_popc[widx] = popc; // population count of this warp
__syncthreads(); // wait until all warps have written wpopc to shared mem
const unsigned int warpcount = CTA_H;
// - use 1 warp to sum over wpopc
if (widx == 0)
scan_warp( idx, warpcount, sm_warp_popc );
__syncthreads();
return eidx;
}
// count the amount of output non-empty ranges in the source list
__global__ void compact_ranges_count(
uint32* offsets,
const int32* src_begin,
const int32* src_end,
const uint32 n_elements,
const uint32 n_blocks,
const uint32 n_elements_per_block)
{
//----------------------------------------------
// Init
//----------------------------------------------
// useful variables (assumes 1D indexing)
__shared__ volatile int sm_warp_popc[64];
const uint32 block_id = blockIdx.x;
const uint32 group_size = CTA_SIZE;
const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA
const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA
uint32 offset = 0;
for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size)
{
const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA
if (group_end <= group_begin)
break;
__syncthreads();
//----------------------------------------------
// Compaction condition
//----------------------------------------------
const uint32 local_id = threadIdx.x;
const uint32 global_id = group_begin + local_id;
// check if input should go to output
bool valid = false;
if (global_id < n_elements)
{
if (src_begin[ global_id ] < src_end[ global_id ])
valid = true;
}
//---------------------------------------------------
// Do an intra-cta reduction on the number of outputs
//---------------------------------------------------
scan_popc( valid, sm_warp_popc );
// ----------------------------------------------
// Increment global offset
// ----------------------------------------------
const unsigned int warpcount = CTA_H;
offset += sm_warp_popc[warpcount-1]; // constant across CTA
__syncthreads();
}
if (threadIdx.x == 0)
offsets[ block_id ] = offset;
}
// emit the compacted list of non-empty ranges
__global__ void compact_ranges_write(
int32* dest_begin,
int32* dest_end,
int32* dest_id,
const uint32* offsets,
const int32* src_begin,
const int32* src_end,
const uint32 n_elements,
const uint32 n_blocks,
const uint32 n_elements_per_block)
{
//----------------------------------------------
// Init
//----------------------------------------------
// useful variables (assumes 1D indexing)
const int widx = threadIdx.x / 32;
__shared__ volatile int sm_warp_popc[64];
const uint32 block_id = blockIdx.x;
const uint32 group_size = CTA_SIZE;
const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA
const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA
uint32 offset = offsets[ block_id ]; // constant across CTA
for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size)
{
const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA
if (group_end <= group_begin)
break;
__syncthreads();
//----------------------------------------------
// Compaction condition
//----------------------------------------------
const uint32 local_id = threadIdx.x;
const uint32 global_id = group_begin + local_id;
// check if input should go to output
bool valid = false;
int32 in_begin;
int32 in_end;
if (global_id < n_elements)
{
in_begin = src_begin[ global_id ];
in_end = src_end[ global_id ];
if (in_begin < in_end)
valid = true;
}
//---------------------------------------------------
// Do an intra-cta reduction on the number of outputs
//---------------------------------------------------
const int eidx = scan_popc( valid, sm_warp_popc );
//----------------------------------------------
// Write to compact output buffer
//----------------------------------------------
if (valid)
{
//const uint32 tpopc = (widx ? sm_warp_popc[widx-1] : 0u) + eidx;
uint32 tpopc = eidx;
if (widx)
tpopc += sm_warp_popc[widx-1];
const uint32 destIdx = offset + tpopc;
dest_begin[destIdx] = in_begin;
dest_end[destIdx] = in_end;
dest_id[destIdx] = global_id;
}
// ----------------------------------------------
// Increment global offset
// ----------------------------------------------
__syncthreads();
const unsigned int warpcount = CTA_H;
offset += sm_warp_popc[warpcount-1]; // constant across CTA
}
}
} // namespace compact
Compact_ranges::Compact_ranges(const uint32 n)
{
const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0);
m_counters.resize( max_blocks );
m_offsets.resize( max_blocks );
}
// given two arrays {b[0], b[1], ..., b[n-1]} and
// {e[0], e[1], ..., e[n-1]} specifying a set of n
// possibly empty ranges { [b(i),e(i)) : i = 0,...,n-1 },
// return a copy of the two arrays with all the empty
// ranges removed, and an array specifying their position
// in the original list.
//
// \param dest_begin output range start indices
// \param dest_end output range end indices
// \param dest_id output range index in the original list
// \param src_begin input range start indices
// \param src_end input range end indices
// \param n number of input elements
// \result number of output elements
//
uint32 Compact_ranges::run(int32* dest_begin, int32* dest_end, int32* dest_id, const int32* src_begin, const int32* src_end, const uint32 n_elements)
{
const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0);
const uint32 group_size = CTA_SIZE;
const uint32 n_groups = (n_elements + group_size-1) / group_size;
const size_t n_blocks = ::min( (int)max_blocks, (int)n_groups );
const uint32 n_groups_per_block = (n_groups + n_blocks-1) / n_blocks; // constant across CTA
const uint32 n_elements_per_block = n_groups_per_block * group_size; // constant across CTA
uint32* counters_ptr = thrust::raw_pointer_cast( &*(m_counters.begin()) );
uint32* offsets_ptr = thrust::raw_pointer_cast( &*(m_offsets.begin()) );
// count the number of outputs per block
hipLaunchKernelGGL(( compact::compact_ranges_count), dim3(n_blocks),dim3(CTA_SIZE), 0, 0, counters_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block );
hipDeviceSynchronize();
// read the last block counter before it's overwritten
const uint32 last_block = m_counters[n_blocks-1];
// do an exclusive scan on the block counters to get proper offsets
thrust::exclusive_scan(
m_counters.begin(),
m_counters.begin() + n_blocks,
m_offsets.begin(),
uint32(0) );
hipDeviceSynchronize();
// perform the actual writing
hipLaunchKernelGGL(( compact::compact_ranges_write), dim3(n_blocks),dim3(CTA_SIZE), 0, 0, dest_begin, dest_end, dest_id, offsets_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block );
hipDeviceSynchronize();
// return number of output elements
return m_offsets[n_blocks-1] + last_block;
}
#undef CTA_SIZE
#undef CTA_H
} // namespace voxelpipe
| 7afd31ac71a798cef65be1d05effbed42eb5c6ee.cu | /*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <voxelpipe/compact_ranges.h>
#include "thrust_arch.h"
#include <thrust/scan.h>
#define CTA_SIZE 512
#define CTA_H (512/32)
namespace voxelpipe {
namespace compact {
__forceinline__ __device__ int scan_popc(bool p, int& popc, const int tidx, volatile int *red)
{
const uint32 mask = __ballot( p );
popc = __popc( mask );
return __popc( mask << (32 - tidx) );
}
// intra-warp inclusive scan
__forceinline__ __device__ void scan_warp(int tidx, unsigned int limit, volatile int *red)
{
const uint32 val = red[tidx];
// pad initial segment with zeros
red[tidx] = 0;
red += 32;
// Hillis-Steele scan
red[tidx] = val;
red[tidx] += red[tidx-1];
red[tidx] += red[tidx-2];
red[tidx] += red[tidx-4];
red[tidx] += red[tidx-8];
red[tidx] += red[tidx-16];
// propagate resullpv back
red[tidx-32] = red[tidx];
}
__forceinline__ __device__ int scan_popc(bool valid, volatile int* sm_warp_popc)
{
int idx = threadIdx.x;
int tidx = threadIdx.x & 31;
int widx = threadIdx.x / 32;
__shared__ volatile int sm_red[CTA_SIZE*2];
volatile int *sm_warp_red = sm_red + widx*64;
int popc;
int eidx = scan_popc(valid,popc,tidx,sm_warp_red);
if (tidx == 0)
sm_warp_popc[widx] = popc; // population count of this warp
__syncthreads(); // wait until all warps have written wpopc to shared mem
const unsigned int warpcount = CTA_H;
// - use 1 warp to sum over wpopc
if (widx == 0)
scan_warp( idx, warpcount, sm_warp_popc );
__syncthreads();
return eidx;
}
// count the amount of output non-empty ranges in the source list
__global__ void compact_ranges_count(
uint32* offsets,
const int32* src_begin,
const int32* src_end,
const uint32 n_elements,
const uint32 n_blocks,
const uint32 n_elements_per_block)
{
//----------------------------------------------
// Init
//----------------------------------------------
// useful variables (assumes 1D indexing)
__shared__ volatile int sm_warp_popc[64];
const uint32 block_id = blockIdx.x;
const uint32 group_size = CTA_SIZE;
const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA
const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA
uint32 offset = 0;
for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size)
{
const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA
if (group_end <= group_begin)
break;
__syncthreads();
//----------------------------------------------
// Compaction condition
//----------------------------------------------
const uint32 local_id = threadIdx.x;
const uint32 global_id = group_begin + local_id;
// check if input should go to output
bool valid = false;
if (global_id < n_elements)
{
if (src_begin[ global_id ] < src_end[ global_id ])
valid = true;
}
//---------------------------------------------------
// Do an intra-cta reduction on the number of outputs
//---------------------------------------------------
scan_popc( valid, sm_warp_popc );
// ----------------------------------------------
// Increment global offset
// ----------------------------------------------
const unsigned int warpcount = CTA_H;
offset += sm_warp_popc[warpcount-1]; // constant across CTA
__syncthreads();
}
if (threadIdx.x == 0)
offsets[ block_id ] = offset;
}
// emit the compacted list of non-empty ranges
__global__ void compact_ranges_write(
int32* dest_begin,
int32* dest_end,
int32* dest_id,
const uint32* offsets,
const int32* src_begin,
const int32* src_end,
const uint32 n_elements,
const uint32 n_blocks,
const uint32 n_elements_per_block)
{
//----------------------------------------------
// Init
//----------------------------------------------
// useful variables (assumes 1D indexing)
const int widx = threadIdx.x / 32;
__shared__ volatile int sm_warp_popc[64];
const uint32 block_id = blockIdx.x;
const uint32 group_size = CTA_SIZE;
const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA
const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA
uint32 offset = offsets[ block_id ]; // constant across CTA
for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size)
{
const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA
if (group_end <= group_begin)
break;
__syncthreads();
//----------------------------------------------
// Compaction condition
//----------------------------------------------
const uint32 local_id = threadIdx.x;
const uint32 global_id = group_begin + local_id;
// check if input should go to output
bool valid = false;
int32 in_begin;
int32 in_end;
if (global_id < n_elements)
{
in_begin = src_begin[ global_id ];
in_end = src_end[ global_id ];
if (in_begin < in_end)
valid = true;
}
//---------------------------------------------------
// Do an intra-cta reduction on the number of outputs
//---------------------------------------------------
const int eidx = scan_popc( valid, sm_warp_popc );
//----------------------------------------------
// Write to compact output buffer
//----------------------------------------------
if (valid)
{
//const uint32 tpopc = (widx ? sm_warp_popc[widx-1] : 0u) + eidx;
uint32 tpopc = eidx;
if (widx)
tpopc += sm_warp_popc[widx-1];
const uint32 destIdx = offset + tpopc;
dest_begin[destIdx] = in_begin;
dest_end[destIdx] = in_end;
dest_id[destIdx] = global_id;
}
// ----------------------------------------------
// Increment global offset
// ----------------------------------------------
__syncthreads();
const unsigned int warpcount = CTA_H;
offset += sm_warp_popc[warpcount-1]; // constant across CTA
}
}
} // namespace compact
Compact_ranges::Compact_ranges(const uint32 n)
{
const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0);
m_counters.resize( max_blocks );
m_offsets.resize( max_blocks );
}
// given two arrays {b[0], b[1], ..., b[n-1]} and
// {e[0], e[1], ..., e[n-1]} specifying a set of n
// possibly empty ranges { [b(i),e(i)) : i = 0,...,n-1 },
// return a copy of the two arrays with all the empty
// ranges removed, and an array specifying their position
// in the original list.
//
// \param dest_begin output range start indices
// \param dest_end output range end indices
// \param dest_id output range index in the original list
// \param src_begin input range start indices
// \param src_end input range end indices
// \param n number of input elements
// \result number of output elements
//
uint32 Compact_ranges::run(int32* dest_begin, int32* dest_end, int32* dest_id, const int32* src_begin, const int32* src_end, const uint32 n_elements)
{
const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0);
const uint32 group_size = CTA_SIZE;
const uint32 n_groups = (n_elements + group_size-1) / group_size;
const size_t n_blocks = std::min( (int)max_blocks, (int)n_groups );
const uint32 n_groups_per_block = (n_groups + n_blocks-1) / n_blocks; // constant across CTA
const uint32 n_elements_per_block = n_groups_per_block * group_size; // constant across CTA
uint32* counters_ptr = thrust::raw_pointer_cast( &*(m_counters.begin()) );
uint32* offsets_ptr = thrust::raw_pointer_cast( &*(m_offsets.begin()) );
// count the number of outputs per block
compact::compact_ranges_count<<<n_blocks,CTA_SIZE>>>( counters_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block );
cudaThreadSynchronize();
// read the last block counter before it's overwritten
const uint32 last_block = m_counters[n_blocks-1];
// do an exclusive scan on the block counters to get proper offsets
thrust::exclusive_scan(
m_counters.begin(),
m_counters.begin() + n_blocks,
m_offsets.begin(),
uint32(0) );
cudaThreadSynchronize();
// perform the actual writing
compact::compact_ranges_write<<<n_blocks,CTA_SIZE>>>( dest_begin, dest_end, dest_id, offsets_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block );
cudaThreadSynchronize();
// return number of output elements
return m_offsets[n_blocks-1] + last_block;
}
#undef CTA_SIZE
#undef CTA_H
} // namespace voxelpipe
|
33689efd956dcda6dd3a83a5b89c719616be6fec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::cuda_stream(),
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
| 33689efd956dcda6dd3a83a5b89c719616be6fec.cu | #include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, Caffe::cuda_stream()>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
19f393a9cc4ee9ff736fd0e986c0f76a4a3ffe2b.hip | // !!! This is a file automatically generated by hipify!!!
// Include C++ header files.
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "./include/config.cuh"
#include "./include/util.cuh"
#include "./include/gpuMatrixCublas.cuh"
#include "./include/matrixNaive.cuh"
#include "./include/matrixTile.cuh"
#include "./include/matrixCoalescing.cuh"
#include "./include/matrixBankConflict.cuh"
#include "./include/matrixTileWPT.cuh"
#include "./include/matrixTranspose.cuh"
#include "./include/matrixComOpt.cuh"
#include "./include/cpuMatrixStrassen.cuh"
#include "./include/matrixPrefetch.cuh"
//#include "./include/gpuMatrixStrassen.cuh"
using namespace std;
// Include local CUDA header files.
int main(int argc, char ** argv){
// set up device
int dev = 0;
initDevice(dev);
// input m, n, k
int m = 32, n = 32, k = 32;
if(argc > 1) m = atoi(argv[1]);
if(m % 32) {
cout << "The input must be a multiple number of 32!\n";
return 0;
}
n = k = m;
// Allocate memory space on the host
int *h_A = (int*)malloc(sizeof(int) * (m * n));
int *h_B = (int*)malloc(sizeof(int) * (n * k));
int *h_BT =(int*)malloc(sizeof(int) * (n * k));
int *h_C = (int*)malloc(sizeof(int) * (m * k));
int *h_odata = (int*)malloc(sizeof(int) * (m * k));
// Initialize
initialDataInt(h_A, m * n);
initialDataInt(h_B, n * k);
matrixTranspose(h_B, h_BT, n, k);
// Allocate memory space on the device
int *d_A, *d_B, *d_BT, *d_C;
hipMalloc((void**)&d_A, sizeof(int) * (m * n));
hipMalloc((void**)&d_B, sizeof(int) * (n * k));
hipMalloc((void**)&d_BT,sizeof(int) * (n * k));
hipMalloc((void**)&d_C, sizeof(int) * (m * k));
// Copy matrix A and B from host to device memory
hipMemcpy(d_A, h_A, sizeof(int) * (m * n), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(int) * (n * k), hipMemcpyHostToDevice);
hipMemcpy(d_BT,h_BT,sizeof(int) * (n * k), hipMemcpyHostToDevice);
// CPU Matrix multiplication
double iStart = cpuSecond();
cpuMatrixMul(h_A, h_B, h_C, m, n, k);
double iElaps = cpuSecond() - iStart;
printf("cpu Matrix multiplication\t\telapsed %f sec.\n", iElaps);
// CPU Matrix multiplication by Strassen
Matrix a(h_A, n), b(h_B, n);
iStart = cpuSecond();
Matrix c = strassen(a, b);
iElaps = cpuSecond() - iStart;
printf("cpu Matrix multiplication by Strassen\telapsed %f sec.\n", iElaps);
c.checkResult(h_C);
// GPU Matrix Benchmark
float alpha = 1.0f, beta = 0.0f;
gpuMatrixCublas(h_A, h_B, h_C, m, n, k, m, n, k, alpha, beta);
// GPU Matrix multiplication
unsigned int gridRows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridCols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(gridRows, gridCols);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
iStart = cpuSecond();
gpuMatrixMul<< <grid, block >> > (d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) * (m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
// Check result
checkResult(h_C, h_odata, m * k);
//cublas(d_A, d_B, d_C, m, n, k);
// GPU Matrix multiplication by tile
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
if(grid.x == 0 || grid.y == 0){
unsigned int gridRows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridCols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(gridRows, gridCols);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
iStart = cpuSecond();
gpuMatrixMul<< <grid, block >> > (d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
}
else{
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixMulTile), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
}
printf("gpu Matrix multiplication2\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by Coalescing
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixMulCoalescing), dim3(grid), dim3(block), 0, 0, d_A, d_BT, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication3\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by avoiding share memory bank conflict
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixMulBankConflict), dim3(grid), dim3(block), 0, 0, d_A, d_BT, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication4\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by tile, optimized by WPT = 4
block.x = TILE_SIZE / 4, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixMulTileWPTop4), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication5(WPT = 4)\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by tile, optimized by Computational optimization4
if(m > 32){
block.x = TILE_SIZE, block.y = VEC_SIZE;
grid.x = k / (TILE_SIZE * VEC_SIZE), grid.y = m / TILE_SIZE;
//grid.x = (k + TILE_SIZE - 1) / TILE_SIZE, grid.y = (m + TILE_SIZE * VEC_SIZE - 1) / (TILE_SIZE * VEC_SIZE);
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixComOpt), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication6\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
}
// GPU Matrix multiplication by prefetching
//block.x = TILE_SIZE, block.y = 8;
//grid.x = k / (TILE_SIZE * 8), grid.y = m / TILE_SIZE;
if(m > 32){
block.x = TILE_SIZE, block.y = VEC_SIZE;
grid.x = k / (TILE_SIZE * VEC_SIZE), grid.y = m / TILE_SIZE;
iStart = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixMulPrefetch), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, m, n, k);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(hipMemcpy(h_odata, d_C, sizeof(int) *(m * k), hipMemcpyDeviceToHost));
printf("gpu Matrix multiplication7\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
}
free(h_A);
free(h_B);
free(h_BT);
free(h_C);
free(h_odata);
hipFree(d_A);
hipFree(d_B);
hipFree(d_BT);
hipFree(d_C);
return 0;
}
| 19f393a9cc4ee9ff736fd0e986c0f76a4a3ffe2b.cu | // Include C++ header files.
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include "./include/config.cuh"
#include "./include/util.cuh"
#include "./include/gpuMatrixCublas.cuh"
#include "./include/matrixNaive.cuh"
#include "./include/matrixTile.cuh"
#include "./include/matrixCoalescing.cuh"
#include "./include/matrixBankConflict.cuh"
#include "./include/matrixTileWPT.cuh"
#include "./include/matrixTranspose.cuh"
#include "./include/matrixComOpt.cuh"
#include "./include/cpuMatrixStrassen.cuh"
#include "./include/matrixPrefetch.cuh"
//#include "./include/gpuMatrixStrassen.cuh"
using namespace std;
// Include local CUDA header files.
int main(int argc, char ** argv){
// set up device
int dev = 0;
initDevice(dev);
// input m, n, k
int m = 32, n = 32, k = 32;
if(argc > 1) m = atoi(argv[1]);
if(m % 32) {
cout << "The input must be a multiple number of 32!\n";
return 0;
}
n = k = m;
// Allocate memory space on the host
int *h_A = (int*)malloc(sizeof(int) * (m * n));
int *h_B = (int*)malloc(sizeof(int) * (n * k));
int *h_BT =(int*)malloc(sizeof(int) * (n * k));
int *h_C = (int*)malloc(sizeof(int) * (m * k));
int *h_odata = (int*)malloc(sizeof(int) * (m * k));
// Initialize
initialDataInt(h_A, m * n);
initialDataInt(h_B, n * k);
matrixTranspose(h_B, h_BT, n, k);
// Allocate memory space on the device
int *d_A, *d_B, *d_BT, *d_C;
cudaMalloc((void**)&d_A, sizeof(int) * (m * n));
cudaMalloc((void**)&d_B, sizeof(int) * (n * k));
cudaMalloc((void**)&d_BT,sizeof(int) * (n * k));
cudaMalloc((void**)&d_C, sizeof(int) * (m * k));
// Copy matrix A and B from host to device memory
cudaMemcpy(d_A, h_A, sizeof(int) * (m * n), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(int) * (n * k), cudaMemcpyHostToDevice);
cudaMemcpy(d_BT,h_BT,sizeof(int) * (n * k), cudaMemcpyHostToDevice);
// CPU Matrix multiplication
double iStart = cpuSecond();
cpuMatrixMul(h_A, h_B, h_C, m, n, k);
double iElaps = cpuSecond() - iStart;
printf("cpu Matrix multiplication\t\telapsed %f sec.\n", iElaps);
// CPU Matrix multiplication by Strassen
Matrix a(h_A, n), b(h_B, n);
iStart = cpuSecond();
Matrix c = strassen(a, b);
iElaps = cpuSecond() - iStart;
printf("cpu Matrix multiplication by Strassen\telapsed %f sec.\n", iElaps);
c.checkResult(h_C);
// GPU Matrix Benchmark
float alpha = 1.0f, beta = 0.0f;
gpuMatrixCublas(h_A, h_B, h_C, m, n, k, m, n, k, alpha, beta);
// GPU Matrix multiplication
unsigned int gridRows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridCols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(gridRows, gridCols);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
iStart = cpuSecond();
gpuMatrixMul<< <grid, block >> > (d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) * (m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
// Check result
checkResult(h_C, h_odata, m * k);
//cublas(d_A, d_B, d_C, m, n, k);
// GPU Matrix multiplication by tile
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
if(grid.x == 0 || grid.y == 0){
unsigned int gridRows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridCols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(gridRows, gridCols);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
iStart = cpuSecond();
gpuMatrixMul<< <grid, block >> > (d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
}
else{
iStart = cpuSecond();
gpuMatrixMulTile<<<grid, block>>>(d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
}
printf("gpu Matrix multiplication2\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by Coalescing
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
gpuMatrixMulCoalescing<<<grid, block>>>(d_A, d_BT, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication3\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by avoiding share memory bank conflict
block.x = TILE_SIZE, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
gpuMatrixMulBankConflict<<<grid, block>>>(d_A, d_BT, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication4\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by tile, optimized by WPT = 4
block.x = TILE_SIZE / 4, block.y = TILE_SIZE;
grid.x = k / TILE_SIZE, grid.y = m / TILE_SIZE;
iStart = cpuSecond();
gpuMatrixMulTileWPTop4<<<grid, block>>>(d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication5(WPT = 4)\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
// GPU Matrix multiplication by tile, optimized by Computational optimization4
if(m > 32){
block.x = TILE_SIZE, block.y = VEC_SIZE;
grid.x = k / (TILE_SIZE * VEC_SIZE), grid.y = m / TILE_SIZE;
//grid.x = (k + TILE_SIZE - 1) / TILE_SIZE, grid.y = (m + TILE_SIZE * VEC_SIZE - 1) / (TILE_SIZE * VEC_SIZE);
iStart = cpuSecond();
gpuMatrixComOpt<<<grid, block>>>(d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication6\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
}
// GPU Matrix multiplication by prefetching
//block.x = TILE_SIZE, block.y = 8;
//grid.x = k / (TILE_SIZE * 8), grid.y = m / TILE_SIZE;
if(m > 32){
block.x = TILE_SIZE, block.y = VEC_SIZE;
grid.x = k / (TILE_SIZE * VEC_SIZE), grid.y = m / TILE_SIZE;
iStart = cpuSecond();
gpuMatrixMulPrefetch<<<grid, block>>>(d_A, d_B, d_C, m, n, k);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = cpuSecond() - iStart;
CHECK(cudaMemcpy(h_odata, d_C, sizeof(int) *(m * k), cudaMemcpyDeviceToHost));
printf("gpu Matrix multiplication7\t\telapsed %f sec. <<<grid %d block "
"%d>>>\n", iElaps, grid.x, block.x);
checkResult(h_C, h_odata, m * k);
}
free(h_A);
free(h_B);
free(h_BT);
free(h_C);
free(h_odata);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_BT);
cudaFree(d_C);
return 0;
}
|
3120cfe4c637d3b8fc554891b0cbb3e766085281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_color.h"
__device__ float srgb2linear( float x ) {
return ( x > 0.04045f ) ? __powf( ( x + 0.055f ) / 1.055f, 2.4f ) : x / 12.92f;
}
__device__ float linear2srgb( float x ) {
return ( x > 0.0031308f ) ? (( 1.055f * __powf( x, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * x;
}
__device__ float3 rgb2xyz( float4 c ) {
#if 1
float b = ( c.x > 0.04045f ) ? __powf( ( c.x + 0.055f ) / 1.055f, 2.4f ) : c.x / 12.92f;
float g = ( c.y > 0.04045f ) ? __powf( ( c.y + 0.055f ) / 1.055f, 2.4f ) : c.y / 12.92f;
float r = ( c.z > 0.04045f ) ? __powf( ( c.z + 0.055f ) / 1.055f, 2.4f ) : c.z / 12.92f;
#else
float b = c.x;
float g = c.y;
float r = c.z;
#endif
return make_float3(
100 * (0.4124f * r + 0.3576f * g + 0.1805f * b),
100 * (0.2126f * r + 0.7152f * g + 0.0722f * b),
100 * (0.0193f * r + 0.1192f * g + 0.9505f * b)
);
}
__device__ float4 xyz2rgb( float x, float y, float z ) {
float r = ( 3.2406f * x - 1.5372f * y - 0.4986f * z ) / 100.0f;
float g = (-0.9689f * x + 1.8758f * y + 0.0415f * z ) / 100.0f;
float b = ( 0.0557f * x - 0.2040f * y + 1.0570f * z ) / 100.0f;
#if 1
return make_float4(
( b > 0.0031308f ) ? (( 1.055f * __powf( b, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * b,
( g > 0.0031308f ) ? (( 1.055f * __powf( g, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * g,
( r > 0.0031308f ) ? (( 1.055f * __powf( r, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * r,
1
);
#else
return make_float4(b, g, r, 1);
#endif
}
__global__ void imp_srgb2linear( const gpu_plm2<float> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
dst(ix, iy) = srgb2linear(src(ix, iy));
}
gpu_image<float> gpu_srgb2linear( const gpu_image<float>& src) {
gpu_image<float> dst(src.size());
hipLaunchKernelGGL(( imp_srgb2linear), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_linear2srgb( const gpu_plm2<float> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
dst(ix, iy) = linear2srgb(src(ix, iy));
}
gpu_image<float> gpu_linear2srgb( const gpu_image<float>& src) {
gpu_image<float> dst(src.size());
hipLaunchKernelGGL(( imp_linear2srgb), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_srgb2linear( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(srgb2linear(c.x), srgb2linear(c.y), srgb2linear(c.z), c.w);
}
gpu_image<float4> gpu_srgb2linear( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_srgb2linear), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_linear2srgb( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(linear2srgb(c.x), linear2srgb(c.y), linear2srgb(c.z), c.w);
}
gpu_image<float4> gpu_linear2srgb( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_linear2srgb), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_rgb2lab( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float3 c = rgb2xyz( src(ix, iy) );
c.x /= 95.047f;
c.y /= 100.0f;
c.z /= 108.883f;
float x = ( c.x > 0.008856f ) ? pow( c.x, 1.0f / 3.0f ) : ( 7.787f * c.x ) + ( 16.0f / 116.0f );
float y = ( c.y > 0.008856f ) ? pow( c.y, 1.0f / 3.0f ) : ( 7.787f * c.y ) + ( 16.0f / 116.0f );
float z = ( c.z > 0.008856f ) ? pow( c.z, 1.0f / 3.0f ) : ( 7.787f * c.z ) + ( 16.0f / 116.0f );
dst(ix, iy) = make_float4(
( 116 * y ) - 16,
500 * ( x - y ),
200 * ( y - z ),
1
);
}
gpu_image<float4> gpu_rgb2lab( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_rgb2lab), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_lab2rgb( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
float fy = ( c.x + 16.0f ) / 116.0f;
float fx = c.y / 500.0f + fy;
float fz = fy - c.z / 200.0f;
dst(ix, iy) = xyz2rgb(
95.047f * (( fx > 0.206897f ) ? fx * fx * fx : ( fx - 16.0f / 116.0f ) / 7.787f),
100.000f * (( fy > 0.206897f ) ? fy * fy * fy : ( fy - 16.0f / 116.0f ) / 7.787f),
108.883f * (( fz > 0.206897f ) ? fz * fz * fz : ( fz - 16.0f / 116.0f ) / 7.787f)
);
}
gpu_image<float4> gpu_lab2rgb( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_lab2rgb), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_l2rgb( const gpu_plm2<float> src, gpu_plm2<float4> dst )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float L = src(ix, iy);
float fy = ( L + 16.0f ) / 116.0f;
float fx = fy;
float fz = fy;
dst(ix, iy) = xyz2rgb(
95.047f * (( fx > 0.206897f ) ? fx * fx * fx : ( fx - 16.0f / 116.0f ) / 7.787f),
100.000f * (( fy > 0.206897f ) ? fy * fy * fy : ( fy - 16.0f / 116.0f ) / 7.787f),
108.883f * (( fz > 0.206897f ) ? fz * fz * fz : ( fz - 16.0f / 116.0f ) / 7.787f)
);
}
gpu_image<float4> gpu_l2rgb( const gpu_image<float>& src ) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_l2rgb), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_gray2rgb( const gpu_plm2<float> src, gpu_plm2<float4> dst, bool saturate )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float c = src(ix, iy);
if (saturate) c = __saturatef(c);
dst(ix, iy) = make_float4(c, c, c, 1);
}
gpu_image<float4> gpu_gray2rgb( const gpu_image<float>& src, bool saturate ) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_gray2rgb), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst, saturate);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_rgb2gray( const gpu_plm2<float4> src, gpu_plm2<float> dst )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = 0.299f * __saturatef(c.z) +
0.587f * __saturatef(c.y) +
0.114f * __saturatef(c.x);
}
gpu_image<float> gpu_rgb2gray( const gpu_image<float4>& src ) {
gpu_image<float> dst(src.size());
hipLaunchKernelGGL(( imp_rgb2gray), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_swap_rgba( gpu_plm2<float4> dst, const gpu_plm2<float4> src )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(c.z, c.y, c.x, c.w);
}
gpu_image<float4> gpu_swap_rgba( const gpu_image<float4>& src ) {
gpu_image<float4> dst(src.size());
hipLaunchKernelGGL(( imp_swap_rgba), dim3(dst.blocks()), dim3(dst.threads()), 0, 0, dst, src);
GPU_CHECK_ERROR();
return dst;
}
| 3120cfe4c637d3b8fc554891b0cbb3e766085281.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include "gpu_color.h"
__device__ float srgb2linear( float x ) {
return ( x > 0.04045f ) ? __powf( ( x + 0.055f ) / 1.055f, 2.4f ) : x / 12.92f;
}
__device__ float linear2srgb( float x ) {
return ( x > 0.0031308f ) ? (( 1.055f * __powf( x, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * x;
}
__device__ float3 rgb2xyz( float4 c ) {
#if 1
float b = ( c.x > 0.04045f ) ? __powf( ( c.x + 0.055f ) / 1.055f, 2.4f ) : c.x / 12.92f;
float g = ( c.y > 0.04045f ) ? __powf( ( c.y + 0.055f ) / 1.055f, 2.4f ) : c.y / 12.92f;
float r = ( c.z > 0.04045f ) ? __powf( ( c.z + 0.055f ) / 1.055f, 2.4f ) : c.z / 12.92f;
#else
float b = c.x;
float g = c.y;
float r = c.z;
#endif
return make_float3(
100 * (0.4124f * r + 0.3576f * g + 0.1805f * b),
100 * (0.2126f * r + 0.7152f * g + 0.0722f * b),
100 * (0.0193f * r + 0.1192f * g + 0.9505f * b)
);
}
__device__ float4 xyz2rgb( float x, float y, float z ) {
float r = ( 3.2406f * x - 1.5372f * y - 0.4986f * z ) / 100.0f;
float g = (-0.9689f * x + 1.8758f * y + 0.0415f * z ) / 100.0f;
float b = ( 0.0557f * x - 0.2040f * y + 1.0570f * z ) / 100.0f;
#if 1
return make_float4(
( b > 0.0031308f ) ? (( 1.055f * __powf( b, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * b,
( g > 0.0031308f ) ? (( 1.055f * __powf( g, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * g,
( r > 0.0031308f ) ? (( 1.055f * __powf( r, ( 1.0f / 2.4f ))) - 0.055f ) : 12.92f * r,
1
);
#else
return make_float4(b, g, r, 1);
#endif
}
__global__ void imp_srgb2linear( const gpu_plm2<float> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
dst(ix, iy) = srgb2linear(src(ix, iy));
}
gpu_image<float> gpu_srgb2linear( const gpu_image<float>& src) {
gpu_image<float> dst(src.size());
imp_srgb2linear<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_linear2srgb( const gpu_plm2<float> src, gpu_plm2<float> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
dst(ix, iy) = linear2srgb(src(ix, iy));
}
gpu_image<float> gpu_linear2srgb( const gpu_image<float>& src) {
gpu_image<float> dst(src.size());
imp_linear2srgb<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_srgb2linear( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(srgb2linear(c.x), srgb2linear(c.y), srgb2linear(c.z), c.w);
}
gpu_image<float4> gpu_srgb2linear( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
imp_srgb2linear<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_linear2srgb( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(linear2srgb(c.x), linear2srgb(c.y), linear2srgb(c.z), c.w);
}
gpu_image<float4> gpu_linear2srgb( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
imp_linear2srgb<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_rgb2lab( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float3 c = rgb2xyz( src(ix, iy) );
c.x /= 95.047f;
c.y /= 100.0f;
c.z /= 108.883f;
float x = ( c.x > 0.008856f ) ? pow( c.x, 1.0f / 3.0f ) : ( 7.787f * c.x ) + ( 16.0f / 116.0f );
float y = ( c.y > 0.008856f ) ? pow( c.y, 1.0f / 3.0f ) : ( 7.787f * c.y ) + ( 16.0f / 116.0f );
float z = ( c.z > 0.008856f ) ? pow( c.z, 1.0f / 3.0f ) : ( 7.787f * c.z ) + ( 16.0f / 116.0f );
dst(ix, iy) = make_float4(
( 116 * y ) - 16,
500 * ( x - y ),
200 * ( y - z ),
1
);
}
gpu_image<float4> gpu_rgb2lab( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
imp_rgb2lab<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_lab2rgb( const gpu_plm2<float4> src, gpu_plm2<float4> dst ) {
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
float fy = ( c.x + 16.0f ) / 116.0f;
float fx = c.y / 500.0f + fy;
float fz = fy - c.z / 200.0f;
dst(ix, iy) = xyz2rgb(
95.047f * (( fx > 0.206897f ) ? fx * fx * fx : ( fx - 16.0f / 116.0f ) / 7.787f),
100.000f * (( fy > 0.206897f ) ? fy * fy * fy : ( fy - 16.0f / 116.0f ) / 7.787f),
108.883f * (( fz > 0.206897f ) ? fz * fz * fz : ( fz - 16.0f / 116.0f ) / 7.787f)
);
}
gpu_image<float4> gpu_lab2rgb( const gpu_image<float4>& src) {
gpu_image<float4> dst(src.size());
imp_lab2rgb<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_l2rgb( const gpu_plm2<float> src, gpu_plm2<float4> dst )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float L = src(ix, iy);
float fy = ( L + 16.0f ) / 116.0f;
float fx = fy;
float fz = fy;
dst(ix, iy) = xyz2rgb(
95.047f * (( fx > 0.206897f ) ? fx * fx * fx : ( fx - 16.0f / 116.0f ) / 7.787f),
100.000f * (( fy > 0.206897f ) ? fy * fy * fy : ( fy - 16.0f / 116.0f ) / 7.787f),
108.883f * (( fz > 0.206897f ) ? fz * fz * fz : ( fz - 16.0f / 116.0f ) / 7.787f)
);
}
gpu_image<float4> gpu_l2rgb( const gpu_image<float>& src ) {
gpu_image<float4> dst(src.size());
imp_l2rgb<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_gray2rgb( const gpu_plm2<float> src, gpu_plm2<float4> dst, bool saturate )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if (ix >= dst.w || iy >= dst.h)
return;
float c = src(ix, iy);
if (saturate) c = __saturatef(c);
dst(ix, iy) = make_float4(c, c, c, 1);
}
gpu_image<float4> gpu_gray2rgb( const gpu_image<float>& src, bool saturate ) {
gpu_image<float4> dst(src.size());
imp_gray2rgb<<<dst.blocks(), dst.threads()>>>(src, dst, saturate);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_rgb2gray( const gpu_plm2<float4> src, gpu_plm2<float> dst )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = 0.299f * __saturatef(c.z) +
0.587f * __saturatef(c.y) +
0.114f * __saturatef(c.x);
}
gpu_image<float> gpu_rgb2gray( const gpu_image<float4>& src ) {
gpu_image<float> dst(src.size());
imp_rgb2gray<<<dst.blocks(), dst.threads()>>>(src, dst);
GPU_CHECK_ERROR();
return dst;
}
__global__ void imp_swap_rgba( gpu_plm2<float4> dst, const gpu_plm2<float4> src )
{
const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y;
if(ix >= dst.w || iy >= dst.h)
return;
float4 c = src(ix, iy);
dst(ix, iy) = make_float4(c.z, c.y, c.x, c.w);
}
gpu_image<float4> gpu_swap_rgba( const gpu_image<float4>& src ) {
gpu_image<float4> dst(src.size());
imp_swap_rgba<<<dst.blocks(), dst.threads()>>>(dst, src);
GPU_CHECK_ERROR();
return dst;
}
|
3bcdff05e201a92ba2fe0569ffb78db148bd50e5.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////////////////////////
// File: GameOfLife.h
// Revision: 1
// Date Creation: 13.11.2019
// Last Change: 13.11.2019
// Author: Christian Steinbrecher
// Descrition: Conway's Game of Life
///////////////////////////////////////////////////////////////////////////////
#include "GameOfLife.h"
#include <math_extended.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void kernel(TElem const dp_fieldOld[], TElem dp_fieldNew[],
size_t const m, size_t const n)
{
assert(dp_fieldOld != nullptr);
assert(dp_fieldNew != nullptr);
size_t const x = blockIdx.x * blockDim.x + threadIdx.x;
size_t const y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= 1 && y < m-1 && x >= 1 && x < n-1)
{
dp_fieldNew[y * n + x] = isAlive(dp_fieldOld, m, n, y, x);
}
}
void gameOfLife_gpu(TElem const dp_fieldOld[], TElem dp_fieldNew[],
size_t const m, size_t const n)
{
if (dp_fieldOld == nullptr || dp_fieldNew == nullptr)
{
return;
}
size_t const block_size = 128;
unsigned int bigX = static_cast<unsigned int>(ceil_div(n, block_size));
unsigned int bigY = static_cast<unsigned int>(ceil_div(m, 1));
unsigned int tibX = static_cast<unsigned int>(block_size);
unsigned int tibY = static_cast<unsigned int>(1);
dim3 const big(bigX, bigY); // blocks in grid
dim3 const tib(tibX, tibY); // threads in block
kernel << < big, tib >> > (dp_fieldOld, dp_fieldNew, m, n);
}
__global__ void kernel_drawField(TElem const dp_field[], Vertex dp_vertexBuffer[],
size_t const height, size_t const width, size_t const strideSize,
size_t const m, size_t const n)
{
assert(dp_field != nullptr);
assert(dp_vertexBuffer != nullptr);
assert(height * strideSize <= m);
assert(width * strideSize <= n);
size_t const x = blockIdx.x * blockDim.x + threadIdx.x;
size_t const y = blockIdx.y * blockDim.y + threadIdx.y;
size_t const fieldStartX = (n - width * strideSize) / 2;
size_t const fieldStartY = (m - height * strideSize) / 2;
if (y < height && x < width)
{
if (dp_field[(fieldStartY + y * strideSize) * n + (fieldStartX + x * strideSize)])
{
dp_vertexBuffer[y * width + x].color = { 1.0f, 1.0f, 1.0f };
}
else
{
dp_vertexBuffer[y * width + x].color = { 0.0f, 0.0f, 0.0f };
}
}
}
void drawField_gpu(TElem const dp_field[], Vertex dp_vertexBuffer[],
size_t const height, size_t const width, size_t const strideSize,
size_t const m, size_t const n)
{
if (dp_field == nullptr || dp_vertexBuffer == nullptr)
{
return;
}
if (height * strideSize > m || width * strideSize > n)
{
return;
}
size_t const block_size = 128;
unsigned int bigX = static_cast<unsigned int>(ceil_div(width, block_size));
unsigned int bigY = static_cast<unsigned int>(ceil_div(height, 1));
unsigned int tibX = static_cast<unsigned int>(block_size);
unsigned int tibY = static_cast<unsigned int>(1);
dim3 const big(bigX, bigY); // blocks in grid
dim3 const tib(tibX, tibY); // threads in block
kernel_drawField << < big, tib >> > (dp_field, dp_vertexBuffer, height, width, strideSize, m, n);
} | 3bcdff05e201a92ba2fe0569ffb78db148bd50e5.cu | ///////////////////////////////////////////////////////////////////////////////
// File: GameOfLife.h
// Revision: 1
// Date Creation: 13.11.2019
// Last Change: 13.11.2019
// Author: Christian Steinbrecher
// Descrition: Conway's Game of Life
///////////////////////////////////////////////////////////////////////////////
#include "GameOfLife.h"
#include <math_extended.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void kernel(TElem const dp_fieldOld[], TElem dp_fieldNew[],
size_t const m, size_t const n)
{
assert(dp_fieldOld != nullptr);
assert(dp_fieldNew != nullptr);
size_t const x = blockIdx.x * blockDim.x + threadIdx.x;
size_t const y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= 1 && y < m-1 && x >= 1 && x < n-1)
{
dp_fieldNew[y * n + x] = isAlive(dp_fieldOld, m, n, y, x);
}
}
void gameOfLife_gpu(TElem const dp_fieldOld[], TElem dp_fieldNew[],
size_t const m, size_t const n)
{
if (dp_fieldOld == nullptr || dp_fieldNew == nullptr)
{
return;
}
size_t const block_size = 128;
unsigned int bigX = static_cast<unsigned int>(ceil_div(n, block_size));
unsigned int bigY = static_cast<unsigned int>(ceil_div(m, 1));
unsigned int tibX = static_cast<unsigned int>(block_size);
unsigned int tibY = static_cast<unsigned int>(1);
dim3 const big(bigX, bigY); // blocks in grid
dim3 const tib(tibX, tibY); // threads in block
kernel << < big, tib >> > (dp_fieldOld, dp_fieldNew, m, n);
}
__global__ void kernel_drawField(TElem const dp_field[], Vertex dp_vertexBuffer[],
size_t const height, size_t const width, size_t const strideSize,
size_t const m, size_t const n)
{
assert(dp_field != nullptr);
assert(dp_vertexBuffer != nullptr);
assert(height * strideSize <= m);
assert(width * strideSize <= n);
size_t const x = blockIdx.x * blockDim.x + threadIdx.x;
size_t const y = blockIdx.y * blockDim.y + threadIdx.y;
size_t const fieldStartX = (n - width * strideSize) / 2;
size_t const fieldStartY = (m - height * strideSize) / 2;
if (y < height && x < width)
{
if (dp_field[(fieldStartY + y * strideSize) * n + (fieldStartX + x * strideSize)])
{
dp_vertexBuffer[y * width + x].color = { 1.0f, 1.0f, 1.0f };
}
else
{
dp_vertexBuffer[y * width + x].color = { 0.0f, 0.0f, 0.0f };
}
}
}
void drawField_gpu(TElem const dp_field[], Vertex dp_vertexBuffer[],
size_t const height, size_t const width, size_t const strideSize,
size_t const m, size_t const n)
{
if (dp_field == nullptr || dp_vertexBuffer == nullptr)
{
return;
}
if (height * strideSize > m || width * strideSize > n)
{
return;
}
size_t const block_size = 128;
unsigned int bigX = static_cast<unsigned int>(ceil_div(width, block_size));
unsigned int bigY = static_cast<unsigned int>(ceil_div(height, 1));
unsigned int tibX = static_cast<unsigned int>(block_size);
unsigned int tibY = static_cast<unsigned int>(1);
dim3 const big(bigX, bigY); // blocks in grid
dim3 const tib(tibX, tibY); // threads in block
kernel_drawField << < big, tib >> > (dp_field, dp_vertexBuffer, height, width, strideSize, m, n);
} |
kernal.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _KERNEL_CU_
#define _KERNEL_CU_
#include "GpuMethods.h"
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
__device__ void syncAllThreads(int *flags, int threads)
{
__syncthreads();
while (true) {
//__threadfence_system();
__threadfence();
if (flags[0] == threads) break;
}
}
__global__ void kernel(
float *d_Points,
float *d_Kmeans,
int *d_Clusters,
int *d_Counts,
int *d_Flags,
unsigned int iter,
unsigned int iter1,
int d_N,
int d_K,
int d_D,
int d_totalThreads //,
// float *d_debug
)
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
// suppose this is only one iteration first
if (threadId >= d_N) return;
// REQUIREMENT: (K * D) < (48KB (shared memory size in chip) / 4 (sizeof float)/ 2 (blocks)) = 6000, K < ParallelThreads
__shared__ float meansSHM[256 * 4];
// here for the algorithm loop
for (int loop = 0; loop < MAXLOOP; loop ++) {
for (int i = 0; i < iter1; i++) {
int tempTid = threadIdx.x * iter1 + i;
if (tempTid < d_K)
for (int dim = 0; dim < d_D; dim++) {
meansSHM[tempTid * d_D + dim] = d_Kmeans[tempTid * d_D + dim];
}
}
// SYNC ALL THREADS
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 0, 1);
syncAllThreads(d_Flags + loop * 4 + 0, d_totalThreads);
for (int i = 0; i < iter; i++) {
int tempTid = threadId * iter + i;
if (tempTid >= d_N) break;
// REQUIREMENT: D <= 4
float data[4];
// fetch point data for this thread from global memory
for (int dim = 0; dim < d_D; dim++)
data[dim] = d_Points[tempTid * d_D + dim];
// find the minimal cluster for this point
float minimal = 1E8;
int minIdx = -1;
for (int cluster = 0; cluster < d_K; cluster++) {
float length = 0;
for (int dim = 0; dim < d_D; dim++) {
float t = (data[dim] - meansSHM[cluster * d_D + dim]);
length += t * t;
}
if (length < minimal) {
minimal = length;
minIdx = cluster;
}
}
if (d_Clusters[tempTid] != minIdx) {
d_Clusters[tempTid] = minIdx;
atomicAdd(d_Flags + loop * 4 + 1, 1);
}
// write results to global memory for the next iteration
for (int dim = 0; dim < d_D; dim++)
atomicAdd(d_Kmeans + minIdx * d_D + dim, data[dim]);
atomicAdd(d_Counts + minIdx, 1);
}
// SYNC ALL THREADS again
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 2, 1);
syncAllThreads(d_Flags + loop * 4 + 2, d_totalThreads);
// after syncing all threads here
// check if we need to iterate again
if (d_Flags[loop * 4 + 1] == 0) break;
// update the cluster data in global memory
if (threadId < d_K) {
int count = d_Counts[threadId];
float value1 = (count != 0) ? (1.0f / (float)count) : 0;
for (int dim = 0; dim < d_D; dim++) {
float value = d_Kmeans[threadId * d_D + dim] - meansSHM[threadId * d_D + dim];
d_Kmeans[threadId * d_D + dim] = value * value1;
}
d_Counts[threadId] = 0;
}
// SYNC ALL THREADS again
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 3, 1);
syncAllThreads(d_Flags + loop * 4 + 3, d_totalThreads);
}
}
extern "C"
void kmeansKernal(unsigned int gridX, unsigned int blockX, unsigned iter)
{
hipError_t error = hipSuccess;
dim3 block(blockX, 1, 1);
dim3 grid(gridX, 1, 1);
//cout << "blockX " << blockX << " gridX " << gridX << " iter " << iter << endl;
int iter1 = (K % blockX == 0) ? (K / blockX) : (K / blockX + 1);
iter1 = (iter1 == 0 ? 1 : iter1);
//kernel << <grid, block >> >(GPU::d_points, GPU::d_kmeans, GPU::d_clusters, GPU::d_counts, GPU::d_flags, iter, iter1, N, K, D, GPU::ParallelThreads, GPU::d_debug);
kernel << <grid, block >> >(GPU::d_points, GPU::d_kmeans, GPU::d_clusters, GPU::d_counts, GPU::d_flags, iter, iter1, N, K, D, GPU::ParallelThreads);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("kernel() failed to launch error = %d\n", error);
}
}
#endif _KERNEL_CU_ | kernal.cu | #ifndef _KERNEL_CU_
#define _KERNEL_CU_
#include "GpuMethods.h"
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
__device__ void syncAllThreads(int *flags, int threads)
{
__syncthreads();
while (true) {
//__threadfence_system();
__threadfence();
if (flags[0] == threads) break;
}
}
__global__ void kernel(
float *d_Points,
float *d_Kmeans,
int *d_Clusters,
int *d_Counts,
int *d_Flags,
unsigned int iter,
unsigned int iter1,
int d_N,
int d_K,
int d_D,
int d_totalThreads //,
// float *d_debug
)
{
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
// suppose this is only one iteration first
if (threadId >= d_N) return;
// REQUIREMENT: (K * D) < (48KB (shared memory size in chip) / 4 (sizeof float)/ 2 (blocks)) = 6000, K < ParallelThreads
__shared__ float meansSHM[256 * 4];
// here for the algorithm loop
for (int loop = 0; loop < MAXLOOP; loop ++) {
for (int i = 0; i < iter1; i++) {
int tempTid = threadIdx.x * iter1 + i;
if (tempTid < d_K)
for (int dim = 0; dim < d_D; dim++) {
meansSHM[tempTid * d_D + dim] = d_Kmeans[tempTid * d_D + dim];
}
}
// SYNC ALL THREADS
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 0, 1);
syncAllThreads(d_Flags + loop * 4 + 0, d_totalThreads);
for (int i = 0; i < iter; i++) {
int tempTid = threadId * iter + i;
if (tempTid >= d_N) break;
// REQUIREMENT: D <= 4
float data[4];
// fetch point data for this thread from global memory
for (int dim = 0; dim < d_D; dim++)
data[dim] = d_Points[tempTid * d_D + dim];
// find the minimal cluster for this point
float minimal = 1E8;
int minIdx = -1;
for (int cluster = 0; cluster < d_K; cluster++) {
float length = 0;
for (int dim = 0; dim < d_D; dim++) {
float t = (data[dim] - meansSHM[cluster * d_D + dim]);
length += t * t;
}
if (length < minimal) {
minimal = length;
minIdx = cluster;
}
}
if (d_Clusters[tempTid] != minIdx) {
d_Clusters[tempTid] = minIdx;
atomicAdd(d_Flags + loop * 4 + 1, 1);
}
// write results to global memory for the next iteration
for (int dim = 0; dim < d_D; dim++)
atomicAdd(d_Kmeans + minIdx * d_D + dim, data[dim]);
atomicAdd(d_Counts + minIdx, 1);
}
// SYNC ALL THREADS again
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 2, 1);
syncAllThreads(d_Flags + loop * 4 + 2, d_totalThreads);
// after syncing all threads here
// check if we need to iterate again
if (d_Flags[loop * 4 + 1] == 0) break;
// update the cluster data in global memory
if (threadId < d_K) {
int count = d_Counts[threadId];
float value1 = (count != 0) ? (1.0f / (float)count) : 0;
for (int dim = 0; dim < d_D; dim++) {
float value = d_Kmeans[threadId * d_D + dim] - meansSHM[threadId * d_D + dim];
d_Kmeans[threadId * d_D + dim] = value * value1;
}
d_Counts[threadId] = 0;
}
// SYNC ALL THREADS again
__syncthreads();
atomicAdd(d_Flags + loop * 4 + 3, 1);
syncAllThreads(d_Flags + loop * 4 + 3, d_totalThreads);
}
}
extern "C"
void kmeansKernal(unsigned int gridX, unsigned int blockX, unsigned iter)
{
cudaError_t error = cudaSuccess;
dim3 block(blockX, 1, 1);
dim3 grid(gridX, 1, 1);
//cout << "blockX " << blockX << " gridX " << gridX << " iter " << iter << endl;
int iter1 = (K % blockX == 0) ? (K / blockX) : (K / blockX + 1);
iter1 = (iter1 == 0 ? 1 : iter1);
//kernel << <grid, block >> >(GPU::d_points, GPU::d_kmeans, GPU::d_clusters, GPU::d_counts, GPU::d_flags, iter, iter1, N, K, D, GPU::ParallelThreads, GPU::d_debug);
kernel << <grid, block >> >(GPU::d_points, GPU::d_kmeans, GPU::d_clusters, GPU::d_counts, GPU::d_flags, iter, iter1, N, K, D, GPU::ParallelThreads);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("kernel() failed to launch error = %d\n", error);
}
}
#endif _KERNEL_CU_ |
b8c84e1d34e6ce720f8e388d7031f7a163e8e79f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensorMath.cu"
#else
#include "THHThrustAllocator.cuh"
#include "THHNumerics.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0])
#define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1])
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) {
THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1);
THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]);
THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices);
THCudaSparse_Xcoo2csr(
state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr));
THCudaIntTensor_free(state, rowIndicesInt);
return csr;
}
void THCSTensor_(zero)(THCState *state, THCSTensor *self) {
if (self->indices->_dim()) {
THCIndexTensor_(resizeNdLegacy)(state, self->indices, 0, NULL, NULL);
}
if (self->values->_dim()) {
THCTensor_(resizeNdLegacy)(state, self->values, 0, NULL, NULL);
}
self->nnz = 0;
}
void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_));
THCSTensor_(resizeLegacy)(state, r_, size);
THCSTensor_(zero)(state, r_);
}
void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input)
{
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input));
THCSTensor_(resizeAs)(state, r_, input);
THCSTensor_(zero)(state, r_);
}
void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) {
THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented");
}
void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) {
THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented");
}
void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) {
#if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE)
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense));
THCudaIntTensor *csr;
THCIndexTensor *indices;
THCTensor *values, *r__, *dense_;
THArgCheck(sparse_->nDimensionI == 2, 2,
"matrices expected, got %dD tensor", sparse_->nDimensionI);
THArgCheck(sparse_->nDimensionV == 0, 2,
"scalar values expected, got %dD values", sparse_->nDimensionV);
THArgCheck(dense->_dim() == 2, 2,
"matrices expected, got %dD tensor", dense->_dim());
int64_t m = THCSTensor_(size)(state, sparse_, 0);
int64_t k = THCSTensor_(size)(state, sparse_, 1);
int64_t n = THCTensor_(size)(state, dense, 1);
THCTensor_(resize2d)(state, r_, m, n);
THArgCheck(THCTensor_(size)(state, t, 0) == m, 1,
"Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0));
THArgCheck(THCTensor_(size)(state, t, 1) == n, 1,
"Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1));
THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3,
"Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0));
THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_);
int64_t nnz = THCSTensor_(nnz)(state, sparse);
indices = THCSTensor_(newIndices)(state, sparse);
values = THCSTensor_(newValues)(state, sparse);
THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0);
THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1);
csr = THCSTensor_(toCSR)(state, rowIndices, m, nnz);
THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]);
THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices);
char transpose_dense;
if (beta == 0) {
THCTensor_(zero)(state, r_);
} else if (beta == ScalarConvert<int, real>::to(1)) {
if (t != r_) {
THCTensor_(copy)(state, r_, t);
}
} else {
THCTensor_(mul)(state, r_, t, beta);
}
/* r_ */
if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) {
r__ = r_;
THCTensor_(retain)(state, r__);
} else {
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1);
r__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, r__, NULL, 0, 1);
}
/* dense */
if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) {
transpose_dense = 'n';
dense_ = dense;
THCTensor_(retain)(state, dense_);
} else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) {
transpose_dense = 't';
dense_ = dense;
THCTensor_(retain)(state, dense_);
} else {
transpose_dense = 't';
dense_ = THCTensor_(newContiguous)(state, dense);
}
#if defined(THCS_REAL_IS_FLOAT)
THCudaSparse_Scsrmm2(
#elif defined(THCS_REAL_IS_DOUBLE)
THCudaSparse_Dcsrmm2(
#endif
state,
'n',
transpose_dense,
m,
n,
k,
nnz,
alpha,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, csr),
THCudaIntTensor_data(state, colIndicesInt),
THCTensor_(data)(state, dense_),
(transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]),
beta,
THCTensor_(data)(state, r__),
r__->stride[1]);
/* free intermediate variables */
THCTensor_(free)(state, dense_);
THCTensor_(freeCopyTo)(state, r__, r_);
THCudaIntTensor_free(state, colIndicesInt);
THCudaIntTensor_free(state, csr);
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, rowIndices);
THCIndexTensor_(free)(state, colIndices);
THCTensor_(free)(state, values);
THCSTensor_(free)(state, sparse);
#else
THError("unimplemented data type");
#endif
}
void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) {
THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented");
// TODO Write some kernels
}
void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) {
#if TORCH_HIP_VERSION >= 7000 || defined(__HIP_PLATFORM_HCC__)
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense));
THArgCheck(sparse_->nDimensionI == 2, 3,
"matrices expected, got %dD tensor", sparse_->nDimensionI);
THArgCheck(sparse_->nDimensionV == 0, 3,
"scalar values expected, got %dD values", sparse_->nDimensionV);
THArgCheck(dense->_dim() == 2, 4,
"matrices expected, got %dD tensor", dense->_dim());
int64_t m = THCSTensor_(size)(state, sparse_, 0);
int64_t k = THCSTensor_(size)(state, sparse_, 1);
int64_t n = THCTensor_(size)(state, dense, 1);
THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4,
"Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0));
int64_t size[2] = {m, n};
THCSTensor_(rawResize)(state, r_, 1, 1, size);
THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_);
int64_t nnz = THCSTensor_(nnz)(state, sparse);
THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz);
// create values in column-major format to avoid copying in spaddmm
THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz);
THCTensor_(transpose)(state, values, NULL, 0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse);
THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse);
THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0);
// Save destination indices to output hybrid tensor
THCIndexTensor_(copy)(state, indices, dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices));
THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz);
newSparse->size[0] = nnz;
THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense);
THCSTensor_(_move)(state, r_, indices, values);
THCSTensor_(free)(state, newSparse);
THCIndexTensor_(free)(state, spIndices);
THCIndexTensor_(free)(state, dstIndices);
THCSTensor_(free)(state, sparse);
#undef THRUST_EXEC
}
void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense));
const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse);
if (nnz == 0) {
THCTensor_(resizeAs)(state, r_, dense);
THCTensor_(copy)(state, r_, dense);
return;
}
THCTensor *r = r_;
if (r != dense) {
THCTensor_(retain)(state, r);
THCTensor_(resizeAs)(state, r, dense);
THCTensor_(copy)(state, r, dense);
} else {
if (!THCTensor_(isContiguous)(state, r_)) {
THError("CUDA sparse spcadd: known bug");
}
r = THCTensor_(newContiguous)(state, r_);
}
THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse);
THCTensor *values = THCSTensor_(newValues)(state, sparse);
int64_t nDim = THCTensor_(nDimension)(state, dense);
int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse);
if (THCSTensor_(isCoalesced)(state, sparse)) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
if (sparse->nDimensionV == 0) {
THArgCheck(getApplyGrid(state, nnz, grid, curDevice), 1, CUTORCH_DIM_WARNING);
hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice),
TensorCAddOp<real>(value),
V_INFO(r_), I_INFO(indices), V_INFO(values),
(uint64_t) nnz);
} else {
THArgCheck(getApplyGrid(state, nnz * block.x, grid, curDevice), 1, CUTORCH_DIM_WARNING);
hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice),
TensorCAddOp<real>(value),
V_INFO(r_), I_INFO(indices), V_INFO(values),
(uint64_t) nnz);
}
} else {
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0);
THCIndexTensor_(resize1d)(state, indices1D, nnz);
if (value != ScalarConvert<int, real>::to(1)) {
// FIXME: at some point we can wrap the scale into indexAdd
THCTensor *scaled = THCTensor_(new)(state);
THCTensor_(mul)(state, scaled, values, value);
THCTensor_(free)(state, values);
values = scaled;
}
int64_t view_rows = 1;
int64_t view_columns = 1;
THLongStorage *r_size = THCTensor_(newSizeOf)(state, r);
for (int i = 0; i < nDimI; i++)
view_rows *= THLongStorage_data(r_size)[i];
for (int i = nDimI; i < nDim; i++)
view_columns *= THLongStorage_data(r_size)[i];
THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns);
THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size);
THCTensor_(resize2d)(state, values, nnz, view_columns);
THCTensor_(indexAdd)(state, r_view, 0, indices1D, values);
THCIndexTensor_(free)(state, indices1D);
THLongStorage_free(r_size);
THLongStorage_free(r_view_size);
THCTensor_(free)(state, r_view);
}
THCudaCheck(hipGetLastError());
THCIndexTensor_(free)(state, indices);
THCTensor_(free)(state, values);
THCTensor_(free)(state, r);
}
void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) {
if (r_ == t) {
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCTensor_(mul)(state, r_values_, r_values_, value);
THCTensor_(free)(state, r_values_);
} else {
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(mul)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
}
}
void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) {
if (r_ == t) {
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCTensor_(div)(state, r_values_, r_values_, value);
THCTensor_(free)(state, r_values_);
} else {
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(div)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
}
}
int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
int d;
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
return 0;
}
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
if(self->size[d] != src->size[d]) {
return 0;
}
}
return 1;
}
int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
return self->nDimensionI == src->nDimensionI &&
self->nDimensionV == src->nDimensionV;
}
void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src));
if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) {
THError("cadd operands have incompatible sizes");
}
if (src->nnz == 0) {
THCSTensor_(copy)(state, r_, t);
return;
}
if (t->nnz == 0) {
THCSTensor_(mul)(state, r_, src, value);
return;
}
if(!THCSTensor_(isSameDensity)(state, t, src)) {
THError("cadd operands have incompatible densities");
}
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src);
THCTensor *s_values_ = THCSTensor_(newValues)(state, src);
if (value != ScalarConvert<int, real>::to(1)) {
THCTensor *s_values_orig = s_values_;
s_values_ = THCTensor_(new)(state);
THCTensor_(mul)(state, s_values_, s_values_orig, value);
THCTensor_(free)(state, s_values_orig);
}
THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state);
THCTensor *r_values_ = THCTensor_(new)(state);
THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1);
THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0);
THCSTensor_(resizeAs)(state, r_, src);
THCSTensor_(_move)(state, r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCIndexTensor_(free)(state, s_indices_);
THCTensor_(free)(state, s_values_);
}
void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src);
}
void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_));
if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) {
THError("cmul operands have incompatible sizes or dimension types");
}
THCSTensor *t = THCSTensor_(newCoalesce)(state, t_);
THCSTensor *src = THCSTensor_(newCoalesce)(state, src_);
if (t->nnz == 0 || src->nnz == 0) {
THCSTensor_(zero)(state, r_);
return;
}
// saving those because they can be overwritten when doing in-place operations
ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz;
ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz;
int64_t nDimI = src->nDimensionI;
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src);
THCTensor *s_values_ = THCSTensor_(newValues)(state, src);
THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz);
THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz);
THCTensor_(zero)(state, r_values_);
THCSTensor_(resizeAs)(state, r_, src);
THCSTensor_(_move)(state, r_, r_indices_, r_values_);
int64_t valueSize = t_values_->stride[0];
const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, valueSize, grid, curDevice), 1, CUTORCH_DIM_WARNING);
hipLaunchKernelGGL(( THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice),
TensorMulOp<real>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
(uint64_t)t_nnz, (uint64_t)s_nnz);
THCudaCheck(hipGetLastError());
THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1);
hipLaunchKernelGGL(( THCSTensor_indexSparseIntersectionKernel<uint64_t, real>)
, dim3(1), dim3(1), 0, THCState_getCurrentStreamOnDevice(state, curDevice),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
(uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)THCudaLongStorage_data(state, resultNnz));
THCudaCheck(hipGetLastError());
r_->nnz = THCudaLongStorage_get(state, resultNnz, 0);
THCudaLongStorage_free(state, resultNnz);
r_->coalesced = 1;
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCIndexTensor_(free)(state, s_indices_);
THCTensor_(free)(state, s_values_);
THCSTensor_(free)(state, t);
THCSTensor_(free)(state, src);
}
void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) {
THError("cannot raise to zeroth power on sparse tensor");
}
THCSTensor *t = THCSTensor_(newCoalesce)(state, t_);
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(pow)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCSTensor_(free)(state, t);
}
#if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF)
accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) {
THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self);
accreal result = THCTensor_(normall)(state, self_coalesced->values, value);
THCSTensor_(free)(state, self_coalesced);
return result;
}
#endif
#undef ROW_PTR2
#undef COL_PTR2
#endif
| b8c84e1d34e6ce720f8e388d7031f7a163e8e79f.cu | #ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensorMath.cu"
#else
#include "THCThrustAllocator.cuh"
#include "THCNumerics.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0])
#define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1])
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) {
THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1);
THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]);
THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices);
THCudaSparse_Xcoo2csr(
state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr));
THCudaIntTensor_free(state, rowIndicesInt);
return csr;
}
void THCSTensor_(zero)(THCState *state, THCSTensor *self) {
if (self->indices->_dim()) {
THCIndexTensor_(resizeNdLegacy)(state, self->indices, 0, NULL, NULL);
}
if (self->values->_dim()) {
THCTensor_(resizeNdLegacy)(state, self->values, 0, NULL, NULL);
}
self->nnz = 0;
}
void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_));
THCSTensor_(resizeLegacy)(state, r_, size);
THCSTensor_(zero)(state, r_);
}
void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input)
{
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input));
THCSTensor_(resizeAs)(state, r_, input);
THCSTensor_(zero)(state, r_);
}
void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) {
THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented");
}
void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) {
THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented");
}
void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) {
#if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE)
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense));
THCudaIntTensor *csr;
THCIndexTensor *indices;
THCTensor *values, *r__, *dense_;
THArgCheck(sparse_->nDimensionI == 2, 2,
"matrices expected, got %dD tensor", sparse_->nDimensionI);
THArgCheck(sparse_->nDimensionV == 0, 2,
"scalar values expected, got %dD values", sparse_->nDimensionV);
THArgCheck(dense->_dim() == 2, 2,
"matrices expected, got %dD tensor", dense->_dim());
int64_t m = THCSTensor_(size)(state, sparse_, 0);
int64_t k = THCSTensor_(size)(state, sparse_, 1);
int64_t n = THCTensor_(size)(state, dense, 1);
THCTensor_(resize2d)(state, r_, m, n);
THArgCheck(THCTensor_(size)(state, t, 0) == m, 1,
"Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0));
THArgCheck(THCTensor_(size)(state, t, 1) == n, 1,
"Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1));
THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3,
"Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0));
THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_);
int64_t nnz = THCSTensor_(nnz)(state, sparse);
indices = THCSTensor_(newIndices)(state, sparse);
values = THCSTensor_(newValues)(state, sparse);
THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0);
THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1);
csr = THCSTensor_(toCSR)(state, rowIndices, m, nnz);
THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]);
THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices);
char transpose_dense;
if (beta == 0) {
THCTensor_(zero)(state, r_);
} else if (beta == ScalarConvert<int, real>::to(1)) {
if (t != r_) {
THCTensor_(copy)(state, r_, t);
}
} else {
THCTensor_(mul)(state, r_, t, beta);
}
/* r_ */
if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) {
r__ = r_;
THCTensor_(retain)(state, r__);
} else {
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1);
r__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, r__, NULL, 0, 1);
}
/* dense */
if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) {
transpose_dense = 'n';
dense_ = dense;
THCTensor_(retain)(state, dense_);
} else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) {
transpose_dense = 't';
dense_ = dense;
THCTensor_(retain)(state, dense_);
} else {
transpose_dense = 't';
dense_ = THCTensor_(newContiguous)(state, dense);
}
#if defined(THCS_REAL_IS_FLOAT)
THCudaSparse_Scsrmm2(
#elif defined(THCS_REAL_IS_DOUBLE)
THCudaSparse_Dcsrmm2(
#endif
state,
'n',
transpose_dense,
m,
n,
k,
nnz,
alpha,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, csr),
THCudaIntTensor_data(state, colIndicesInt),
THCTensor_(data)(state, dense_),
(transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]),
beta,
THCTensor_(data)(state, r__),
r__->stride[1]);
/* free intermediate variables */
THCTensor_(free)(state, dense_);
THCTensor_(freeCopyTo)(state, r__, r_);
THCudaIntTensor_free(state, colIndicesInt);
THCudaIntTensor_free(state, csr);
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, rowIndices);
THCIndexTensor_(free)(state, colIndices);
THCTensor_(free)(state, values);
THCSTensor_(free)(state, sparse);
#else
THError("unimplemented data type");
#endif
}
void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) {
THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented");
// TODO Write some kernels
}
void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) {
#if CUDA_VERSION >= 7000 || defined(__HIP_PLATFORM_HCC__)
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense));
THArgCheck(sparse_->nDimensionI == 2, 3,
"matrices expected, got %dD tensor", sparse_->nDimensionI);
THArgCheck(sparse_->nDimensionV == 0, 3,
"scalar values expected, got %dD values", sparse_->nDimensionV);
THArgCheck(dense->_dim() == 2, 4,
"matrices expected, got %dD tensor", dense->_dim());
int64_t m = THCSTensor_(size)(state, sparse_, 0);
int64_t k = THCSTensor_(size)(state, sparse_, 1);
int64_t n = THCTensor_(size)(state, dense, 1);
THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4,
"Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0));
int64_t size[2] = {m, n};
THCSTensor_(rawResize)(state, r_, 1, 1, size);
THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_);
int64_t nnz = THCSTensor_(nnz)(state, sparse);
THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz);
// create values in column-major format to avoid copying in spaddmm
THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz);
THCTensor_(transpose)(state, values, NULL, 0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse);
THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse);
THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0);
// Save destination indices to output hybrid tensor
THCIndexTensor_(copy)(state, indices, dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices));
THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz);
newSparse->size[0] = nnz;
THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense);
THCSTensor_(_move)(state, r_, indices, values);
THCSTensor_(free)(state, newSparse);
THCIndexTensor_(free)(state, spIndices);
THCIndexTensor_(free)(state, dstIndices);
THCSTensor_(free)(state, sparse);
#undef THRUST_EXEC
}
void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense));
const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse);
if (nnz == 0) {
THCTensor_(resizeAs)(state, r_, dense);
THCTensor_(copy)(state, r_, dense);
return;
}
THCTensor *r = r_;
if (r != dense) {
THCTensor_(retain)(state, r);
THCTensor_(resizeAs)(state, r, dense);
THCTensor_(copy)(state, r, dense);
} else {
if (!THCTensor_(isContiguous)(state, r_)) {
THError("CUDA sparse spcadd: known bug");
}
r = THCTensor_(newContiguous)(state, r_);
}
THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse);
THCTensor *values = THCSTensor_(newValues)(state, sparse);
int64_t nDim = THCTensor_(nDimension)(state, dense);
int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse);
if (THCSTensor_(isCoalesced)(state, sparse)) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
if (sparse->nDimensionV == 0) {
THArgCheck(getApplyGrid(state, nnz, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real>
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>(
TensorCAddOp<real>(value),
V_INFO(r_), I_INFO(indices), V_INFO(values),
(uint64_t) nnz);
} else {
THArgCheck(getApplyGrid(state, nnz * block.x, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real>
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>(
TensorCAddOp<real>(value),
V_INFO(r_), I_INFO(indices), V_INFO(values),
(uint64_t) nnz);
}
} else {
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0);
THCIndexTensor_(resize1d)(state, indices1D, nnz);
if (value != ScalarConvert<int, real>::to(1)) {
// FIXME: at some point we can wrap the scale into indexAdd
THCTensor *scaled = THCTensor_(new)(state);
THCTensor_(mul)(state, scaled, values, value);
THCTensor_(free)(state, values);
values = scaled;
}
int64_t view_rows = 1;
int64_t view_columns = 1;
THLongStorage *r_size = THCTensor_(newSizeOf)(state, r);
for (int i = 0; i < nDimI; i++)
view_rows *= THLongStorage_data(r_size)[i];
for (int i = nDimI; i < nDim; i++)
view_columns *= THLongStorage_data(r_size)[i];
THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns);
THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size);
THCTensor_(resize2d)(state, values, nnz, view_columns);
THCTensor_(indexAdd)(state, r_view, 0, indices1D, values);
THCIndexTensor_(free)(state, indices1D);
THLongStorage_free(r_size);
THLongStorage_free(r_view_size);
THCTensor_(free)(state, r_view);
}
THCudaCheck(cudaGetLastError());
THCIndexTensor_(free)(state, indices);
THCTensor_(free)(state, values);
THCTensor_(free)(state, r);
}
void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) {
if (r_ == t) {
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCTensor_(mul)(state, r_values_, r_values_, value);
THCTensor_(free)(state, r_values_);
} else {
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(mul)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
}
}
void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) {
if (r_ == t) {
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCTensor_(div)(state, r_values_, r_values_, value);
THCTensor_(free)(state, r_values_);
} else {
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(div)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
}
}
int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
int d;
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
return 0;
}
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
if(self->size[d] != src->size[d]) {
return 0;
}
}
return 1;
}
int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
return self->nDimensionI == src->nDimensionI &&
self->nDimensionV == src->nDimensionV;
}
void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src));
if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) {
THError("cadd operands have incompatible sizes");
}
if (src->nnz == 0) {
THCSTensor_(copy)(state, r_, t);
return;
}
if (t->nnz == 0) {
THCSTensor_(mul)(state, r_, src, value);
return;
}
if(!THCSTensor_(isSameDensity)(state, t, src)) {
THError("cadd operands have incompatible densities");
}
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src);
THCTensor *s_values_ = THCSTensor_(newValues)(state, src);
if (value != ScalarConvert<int, real>::to(1)) {
THCTensor *s_values_orig = s_values_;
s_values_ = THCTensor_(new)(state);
THCTensor_(mul)(state, s_values_, s_values_orig, value);
THCTensor_(free)(state, s_values_orig);
}
THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state);
THCTensor *r_values_ = THCTensor_(new)(state);
THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1);
THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0);
THCSTensor_(resizeAs)(state, r_, src);
THCSTensor_(_move)(state, r_, r_indices_, r_values_);
// FIXME: add some heuristic about when to call coalesce() here, so that
// tensors don't totally blow up in size by concatenation; e.g.
// r->minUnique = max(a->minUnique + b->minUnique);
// if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) {
// THCSTensor_(contiguous)(r);
// r->minUnique = r->nnz;
// }
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCIndexTensor_(free)(state, s_indices_);
THCTensor_(free)(state, s_values_);
}
void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src);
}
void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_));
if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) {
THError("cmul operands have incompatible sizes or dimension types");
}
THCSTensor *t = THCSTensor_(newCoalesce)(state, t_);
THCSTensor *src = THCSTensor_(newCoalesce)(state, src_);
if (t->nnz == 0 || src->nnz == 0) {
THCSTensor_(zero)(state, r_);
return;
}
// saving those because they can be overwritten when doing in-place operations
ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz;
ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz;
int64_t nDimI = src->nDimensionI;
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src);
THCTensor *s_values_ = THCSTensor_(newValues)(state, src);
THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz);
THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz);
THCTensor_(zero)(state, r_values_);
THCSTensor_(resizeAs)(state, r_, src);
THCSTensor_(_move)(state, r_, r_indices_, r_values_);
int64_t valueSize = t_values_->stride[0];
const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, valueSize, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real>
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>(
TensorMulOp<real>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
(uint64_t)t_nnz, (uint64_t)s_nnz);
THCudaCheck(cudaGetLastError());
THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1);
THCSTensor_indexSparseIntersectionKernel<uint64_t, real>
<<<1, 1, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
(uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)THCudaLongStorage_data(state, resultNnz));
THCudaCheck(cudaGetLastError());
r_->nnz = THCudaLongStorage_get(state, resultNnz, 0);
THCudaLongStorage_free(state, resultNnz);
r_->coalesced = 1;
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCIndexTensor_(free)(state, s_indices_);
THCTensor_(free)(state, s_values_);
THCSTensor_(free)(state, t);
THCSTensor_(free)(state, src);
}
void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) {
THError("cannot raise to zeroth power on sparse tensor");
}
THCSTensor *t = THCSTensor_(newCoalesce)(state, t_);
THCSTensor_(resizeAs)(state, r_, t);
THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_);
THCTensor *r_values_ = THCSTensor_(newValues)(state, r_);
THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t);
THCTensor *t_values_ = THCSTensor_(newValues)(state, t);
THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_);
THCIndexTensor_(copy)(state, r_indices_, t_indices_);
THCTensor_(pow)(state, r_values_, t_values_, value);
r_->nnz = t->nnz;
r_->coalesced = t->coalesced;
THCIndexTensor_(free)(state, r_indices_);
THCTensor_(free)(state, r_values_);
THCIndexTensor_(free)(state, t_indices_);
THCTensor_(free)(state, t_values_);
THCSTensor_(free)(state, t);
}
#if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF)
accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) {
THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self);
accreal result = THCTensor_(normall)(state, self_coalesced->values, value);
THCSTensor_(free)(state, self_coalesced);
return result;
}
#endif
#undef ROW_PTR2
#undef COL_PTR2
#endif
|
floyd_sm.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
extern "C" {
#include "MatUtil.h"
}
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
__device__
int Min(int a, int b) { return a < b ? a : b; }
__global__
void SharedMemoryFloydWarshall(int* mat, int k, int N) {
__shared__ int dist_i_k[TILE_HEIGHT];
__shared__ int dist_k_j[TILE_WIDTH];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N) {
int dist_i_j = mat[i*N + j];
if (i % TILE_HEIGHT == 0) {
dist_k_j[j % TILE_WIDTH] = mat[k*N + j];
}
if (j % TILE_WIDTH == 0) {
dist_i_k[i % TILE_HEIGHT] = mat[i*N + k];
}
__syncthreads();
if (dist_i_k[i % TILE_HEIGHT] != -1 && dist_k_j[j % TILE_WIDTH] != -1) {
int new_dist = dist_i_k[i % TILE_HEIGHT] + dist_k_j[j % TILE_WIDTH];
if (dist_i_j != -1) {
new_dist = Min(new_dist, dist_i_j);
}
mat[i*N + j] = new_dist;
}
}
}
void SharedMemoryFloydWarshallDriver(int* mat, int N, dim3 thread_per_block) {
int* cuda_mat;
int size = sizeof(int) * N * N;
hipMalloc((void**) &cuda_mat, size);
hipMemcpy(cuda_mat, mat, size, hipMemcpyHostToDevice);
dim3 num_block(ceil(1.0*N/thread_per_block.x),
ceil(1.0*N/thread_per_block.y));
for (int k = 0; k < N; ++k) {
hipLaunchKernelGGL(( SharedMemoryFloydWarshall), dim3(num_block), dim3(thread_per_block), 0, 0, cuda_mat, k, N);
}
hipMemcpy(mat, cuda_mat, size, hipMemcpyDeviceToHost);
hipFree(cuda_mat);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
if(argc != 3) {
printf("Usage: test {N} {run_sequential_check: 'T' or 'F'}\n");
exit(-1);
}
char run_sequential_check = argv[2][0];
dim3 thread_per_block(TILE_HEIGHT, TILE_WIDTH);
//generate a random matrix.
size_t N = atoi(argv[1]);
int *mat = (int*)malloc(sizeof(int)*N*N);
GenMatrix(mat, N);
//compute your results
int *result = (int*)malloc(sizeof(int)*N*N);
memcpy(result, mat, sizeof(int)*N*N);
//replace by parallel algorithm
SharedMemoryFloydWarshallDriver(result, N, thread_per_block);
//compare your result with reference result
if (run_sequential_check == 'T') {
int *ref = (int*)malloc(sizeof(int)*N*N);
memcpy(ref, mat, sizeof(int)*N*N);
ST_APSP(ref, N);
if(CmpArray(result, ref, N*N))
printf("Your result is correct.\n");
else
printf("Your result is wrong.\n");
#ifdef PRINT_MATRIX
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%d ", ref[i*N+j]);
}
printf("\n");
}
#endif
}
#ifdef PRINT_MATRIX
printf("==RESULT==\n");
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%d ", result[i*N+j]);
}
printf("\n");
}
#endif
}
| floyd_sm.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
extern "C" {
#include "MatUtil.h"
}
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
__device__
int Min(int a, int b) { return a < b ? a : b; }
__global__
void SharedMemoryFloydWarshall(int* mat, int k, int N) {
__shared__ int dist_i_k[TILE_HEIGHT];
__shared__ int dist_k_j[TILE_WIDTH];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N) {
int dist_i_j = mat[i*N + j];
if (i % TILE_HEIGHT == 0) {
dist_k_j[j % TILE_WIDTH] = mat[k*N + j];
}
if (j % TILE_WIDTH == 0) {
dist_i_k[i % TILE_HEIGHT] = mat[i*N + k];
}
__syncthreads();
if (dist_i_k[i % TILE_HEIGHT] != -1 && dist_k_j[j % TILE_WIDTH] != -1) {
int new_dist = dist_i_k[i % TILE_HEIGHT] + dist_k_j[j % TILE_WIDTH];
if (dist_i_j != -1) {
new_dist = Min(new_dist, dist_i_j);
}
mat[i*N + j] = new_dist;
}
}
}
void SharedMemoryFloydWarshallDriver(int* mat, int N, dim3 thread_per_block) {
int* cuda_mat;
int size = sizeof(int) * N * N;
cudaMalloc((void**) &cuda_mat, size);
cudaMemcpy(cuda_mat, mat, size, cudaMemcpyHostToDevice);
dim3 num_block(ceil(1.0*N/thread_per_block.x),
ceil(1.0*N/thread_per_block.y));
for (int k = 0; k < N; ++k) {
SharedMemoryFloydWarshall<<<num_block, thread_per_block>>>(cuda_mat, k, N);
}
cudaMemcpy(mat, cuda_mat, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_mat);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
if(argc != 3) {
printf("Usage: test {N} {run_sequential_check: 'T' or 'F'}\n");
exit(-1);
}
char run_sequential_check = argv[2][0];
dim3 thread_per_block(TILE_HEIGHT, TILE_WIDTH);
//generate a random matrix.
size_t N = atoi(argv[1]);
int *mat = (int*)malloc(sizeof(int)*N*N);
GenMatrix(mat, N);
//compute your results
int *result = (int*)malloc(sizeof(int)*N*N);
memcpy(result, mat, sizeof(int)*N*N);
//replace by parallel algorithm
SharedMemoryFloydWarshallDriver(result, N, thread_per_block);
//compare your result with reference result
if (run_sequential_check == 'T') {
int *ref = (int*)malloc(sizeof(int)*N*N);
memcpy(ref, mat, sizeof(int)*N*N);
ST_APSP(ref, N);
if(CmpArray(result, ref, N*N))
printf("Your result is correct.\n");
else
printf("Your result is wrong.\n");
#ifdef PRINT_MATRIX
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%d ", ref[i*N+j]);
}
printf("\n");
}
#endif
}
#ifdef PRINT_MATRIX
printf("==RESULT==\n");
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%d ", result[i*N+j]);
}
printf("\n");
}
#endif
}
|
3321af4e0bf90511f48189048da7c652f0750ba7.hip | // !!! This is a file automatically generated by hipify!!!
#define COUNT_H
#include <cstdio>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <sys\timeb.h>
#define MAX_VRIJEDNOST 512
#define BLOCKS 16
#define THREADS 128
void ispis(const char*, int*, int);
void ispis2(const char *, int *, int , int );
void seqCount(int *A, int *C, int n) {
int i, j = 0, counter[MAX_VRIJEDNOST];
memset(counter, 0, sizeof(counter));
// Broji ponavljanja elemenata i upisuje ih u niz counter
for (i = 0; i<n; i++)
counter[A[i]]++;
i = 0;
// upisuje izbrojane elemente u niz C
for (i = 0; i < MAX_VRIJEDNOST; i++) {
while (counter[i] > 0) {
C[j] = i;
j++;
counter[i]--;
}
}
}
__global__ void parCount(int *A, int *B, int n) {
int block_id = blockIdx.x,
block_num = gridDim.x,
block_size,
block_offset,
thread_id = threadIdx.x,
thread_num = blockDim.x,
thread_size,
thread_offset,
offset;
__shared__ int count[MAX_VRIJEDNOST];
//postavljanje inicijalnih vrijednosti svakog threada na 0
thread_size = (thread_num > MAX_VRIJEDNOST ? 1 : MAX_VRIJEDNOST / thread_num);
offset = thread_id * thread_size;
for (int i = offset; i < offset + thread_size && i < MAX_VRIJEDNOST; ++i)
count[i] = 0;
__syncthreads();
//brojanje ponavljanja svih clanova niza. Svaki thread broji svoj dio nesortiranog niza
block_size = (block_num > n ? 1 : n / block_num);
block_offset = block_id * block_size;
thread_size = (thread_num > block_size ? 1 : block_size / thread_num);
offset = block_offset + thread_id * thread_size;
//brojanje elemenata
for (int i = offset; i < offset + thread_size && i < block_offset + block_size && i < n; ++i)
atomicAdd(&count[A[i]], 1);
__syncthreads();
//svaki thread kopira svoj dio u globalnu memoriju
thread_size = (thread_num > MAX_VRIJEDNOST ? 1 : MAX_VRIJEDNOST / thread_num);
thread_offset = thread_id * thread_size;
offset = block_id * MAX_VRIJEDNOST + thread_offset;
if (offset + thread_size <= (block_id + 1) * MAX_VRIJEDNOST)
memcpy(&B[offset], &count[thread_offset], sizeof(int) * thread_size);
}
__global__ void merge(int *B) {
int block_id = blockIdx.x,
block_num = gridDim.x,
block_size,
block_offset,
thread_id = threadIdx.x,
thread_num = blockDim.x,
thread_size,
thread_offset,
offset;
// prolazi kroz veliki niz B i sabire sve elemente podnizova u prvi podniz
for (int i = block_num, j = 2; i != 1; i /= 2, j *= 2) {
// racunanje granice bloka (velicinu)
block_size = i * MAX_VRIJEDNOST / block_num / 2;
block_offset = (block_id / j) * (j * MAX_VRIJEDNOST) + block_size * (block_id % j);
thread_size = (thread_num > block_size ? 1 : block_size / thread_num);
// racunanje offseta gdje pocinje thread i sabiranje countova
offset = block_offset + thread_id * thread_size;
for (int k = offset, l = offset + (MAX_VRIJEDNOST * (j / 2));
k < offset + thread_size && k < block_offset + block_size; ++k, ++l)
B[k] += B[l];
__syncthreads();
}
}
int main(int argc, const char **argv) {
int n = pow(2, 20);
printf("Broj elemenata: %d\n", n);
printf("Max element: %d\n", MAX_VRIJEDNOST);
printf("Broj threadova: %d\n", THREADS);
printf("Broj blokova: %d\n\n", BLOCKS);
int *dA, *dB;
int *A = (int*)calloc(n, sizeof(int));
int *B = (int*)calloc(MAX_VRIJEDNOST, sizeof(int));
int *C = (int*)calloc(n, sizeof(int));
// alokacija memorije na grafickoj
hipMalloc((void**)&dA, sizeof(int) * n);
hipMalloc((void**)&dB, sizeof(int) * BLOCKS * MAX_VRIJEDNOST);
srand(time(NULL));
// upisivanje random vrijednosti u niz
for (int i = 0; i < n; ++i)
A[i] = rand() % MAX_VRIJEDNOST;
// kopiranje niza u graficku memoriju
hipMemcpy(dA, A, sizeof(int) * n, hipMemcpyHostToDevice);
//Paralelni
struct timeb start, end, start1;
int diff;
ftime(&start1);
// OVO JE POTENCIJALNO O(1)
printf("Paralelni: \n");
parCount << <BLOCKS, THREADS >> >(dA, dB, n);
merge << <BLOCKS, THREADS >> >(dB);
ftime(&end);
diff = (int)(1000.0 * (end.time - start1.time)
+ (end.millitm - start1.millitm));
printf("\Paralelno prebrojavanje elemenata je trajalo %u milisekundi", diff);
ftime(&start);
hipMemcpy(B, dB, sizeof(int) * MAX_VRIJEDNOST, hipMemcpyDeviceToHost);
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nKopiranje iz graficke u radnu memoriju je trajalo %u milisekundi", diff);
ftime(&start);
int j = 0;
for (int i = 0; i < MAX_VRIJEDNOST; i++) {
while (B[i] > 0) {
C[j] = i;
j++;
B[i]--;
}
}
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nKreiranje sortiranog niza trajalo %u milisekundi", diff);
diff = (int)(1000.0 * (end.time - start1.time)
+ (end.millitm - start1.millitm));
printf("\nUkupno je trajalo %u milisekundi\n", diff);
//ispis2("Paralelni", C, n, 100000);
//ispis("Paralelni", C, n);
//Sekvencijalni
ftime(&start);
seqCount(A, C, n);
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nSekvencijalni: ");
printf("\nSortiranje je trajalo %u milisekundi\n", diff);
//ispis2("Sekvencijalni", C, n, 1000);
//ispis("Sekvencijalni", C, n);
char str[60];
fgets(str, 60, stdin);
hipFree(dA);
hipFree(dB);
delete[] A;
delete[] B;
delete[] C;
return EXIT_SUCCESS;
}
void ispis(const char *naziv, int *niz, int velicina) {
printf("%s = [%d", naziv, niz[0]);
for (int i = 0; i < velicina; ++i) printf(", %d", niz[i]);
printf("]\n");
}
void ispis2(const char *naziv, int *niz, int velicina, int jump) {
printf("%s = [%d", naziv, niz[0]);
for (int i = 0; i < velicina; i=i+jump) printf(", %d", niz[i]);
printf("]\n");
} | 3321af4e0bf90511f48189048da7c652f0750ba7.cu | #define COUNT_H
#include <cstdio>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <sys\timeb.h>
#define MAX_VRIJEDNOST 512
#define BLOCKS 16
#define THREADS 128
void ispis(const char*, int*, int);
void ispis2(const char *, int *, int , int );
void seqCount(int *A, int *C, int n) {
int i, j = 0, counter[MAX_VRIJEDNOST];
memset(counter, 0, sizeof(counter));
// Broji ponavljanja elemenata i upisuje ih u niz counter
for (i = 0; i<n; i++)
counter[A[i]]++;
i = 0;
// upisuje izbrojane elemente u niz C
for (i = 0; i < MAX_VRIJEDNOST; i++) {
while (counter[i] > 0) {
C[j] = i;
j++;
counter[i]--;
}
}
}
__global__ void parCount(int *A, int *B, int n) {
int block_id = blockIdx.x,
block_num = gridDim.x,
block_size,
block_offset,
thread_id = threadIdx.x,
thread_num = blockDim.x,
thread_size,
thread_offset,
offset;
__shared__ int count[MAX_VRIJEDNOST];
//postavljanje inicijalnih vrijednosti svakog threada na 0
thread_size = (thread_num > MAX_VRIJEDNOST ? 1 : MAX_VRIJEDNOST / thread_num);
offset = thread_id * thread_size;
for (int i = offset; i < offset + thread_size && i < MAX_VRIJEDNOST; ++i)
count[i] = 0;
__syncthreads();
//brojanje ponavljanja svih clanova niza. Svaki thread broji svoj dio nesortiranog niza
block_size = (block_num > n ? 1 : n / block_num);
block_offset = block_id * block_size;
thread_size = (thread_num > block_size ? 1 : block_size / thread_num);
offset = block_offset + thread_id * thread_size;
//brojanje elemenata
for (int i = offset; i < offset + thread_size && i < block_offset + block_size && i < n; ++i)
atomicAdd(&count[A[i]], 1);
__syncthreads();
//svaki thread kopira svoj dio u globalnu memoriju
thread_size = (thread_num > MAX_VRIJEDNOST ? 1 : MAX_VRIJEDNOST / thread_num);
thread_offset = thread_id * thread_size;
offset = block_id * MAX_VRIJEDNOST + thread_offset;
if (offset + thread_size <= (block_id + 1) * MAX_VRIJEDNOST)
memcpy(&B[offset], &count[thread_offset], sizeof(int) * thread_size);
}
__global__ void merge(int *B) {
int block_id = blockIdx.x,
block_num = gridDim.x,
block_size,
block_offset,
thread_id = threadIdx.x,
thread_num = blockDim.x,
thread_size,
thread_offset,
offset;
// prolazi kroz veliki niz B i sabire sve elemente podnizova u prvi podniz
for (int i = block_num, j = 2; i != 1; i /= 2, j *= 2) {
// racunanje granice bloka (velicinu)
block_size = i * MAX_VRIJEDNOST / block_num / 2;
block_offset = (block_id / j) * (j * MAX_VRIJEDNOST) + block_size * (block_id % j);
thread_size = (thread_num > block_size ? 1 : block_size / thread_num);
// racunanje offseta gdje pocinje thread i sabiranje countova
offset = block_offset + thread_id * thread_size;
for (int k = offset, l = offset + (MAX_VRIJEDNOST * (j / 2));
k < offset + thread_size && k < block_offset + block_size; ++k, ++l)
B[k] += B[l];
__syncthreads();
}
}
int main(int argc, const char **argv) {
int n = pow(2, 20);
printf("Broj elemenata: %d\n", n);
printf("Max element: %d\n", MAX_VRIJEDNOST);
printf("Broj threadova: %d\n", THREADS);
printf("Broj blokova: %d\n\n", BLOCKS);
int *dA, *dB;
int *A = (int*)calloc(n, sizeof(int));
int *B = (int*)calloc(MAX_VRIJEDNOST, sizeof(int));
int *C = (int*)calloc(n, sizeof(int));
// alokacija memorije na grafickoj
cudaMalloc((void**)&dA, sizeof(int) * n);
cudaMalloc((void**)&dB, sizeof(int) * BLOCKS * MAX_VRIJEDNOST);
srand(time(NULL));
// upisivanje random vrijednosti u niz
for (int i = 0; i < n; ++i)
A[i] = rand() % MAX_VRIJEDNOST;
// kopiranje niza u graficku memoriju
cudaMemcpy(dA, A, sizeof(int) * n, cudaMemcpyHostToDevice);
//Paralelni
struct timeb start, end, start1;
int diff;
ftime(&start1);
// OVO JE POTENCIJALNO O(1)
printf("Paralelni: \n");
parCount << <BLOCKS, THREADS >> >(dA, dB, n);
merge << <BLOCKS, THREADS >> >(dB);
ftime(&end);
diff = (int)(1000.0 * (end.time - start1.time)
+ (end.millitm - start1.millitm));
printf("\Paralelno prebrojavanje elemenata je trajalo %u milisekundi", diff);
ftime(&start);
cudaMemcpy(B, dB, sizeof(int) * MAX_VRIJEDNOST, cudaMemcpyDeviceToHost);
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nKopiranje iz graficke u radnu memoriju je trajalo %u milisekundi", diff);
ftime(&start);
int j = 0;
for (int i = 0; i < MAX_VRIJEDNOST; i++) {
while (B[i] > 0) {
C[j] = i;
j++;
B[i]--;
}
}
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nKreiranje sortiranog niza trajalo %u milisekundi", diff);
diff = (int)(1000.0 * (end.time - start1.time)
+ (end.millitm - start1.millitm));
printf("\nUkupno je trajalo %u milisekundi\n", diff);
//ispis2("Paralelni", C, n, 100000);
//ispis("Paralelni", C, n);
//Sekvencijalni
ftime(&start);
seqCount(A, C, n);
ftime(&end);
diff = (int)(1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nSekvencijalni: ");
printf("\nSortiranje je trajalo %u milisekundi\n", diff);
//ispis2("Sekvencijalni", C, n, 1000);
//ispis("Sekvencijalni", C, n);
char str[60];
fgets(str, 60, stdin);
cudaFree(dA);
cudaFree(dB);
delete[] A;
delete[] B;
delete[] C;
return EXIT_SUCCESS;
}
void ispis(const char *naziv, int *niz, int velicina) {
printf("%s = [%d", naziv, niz[0]);
for (int i = 0; i < velicina; ++i) printf(", %d", niz[i]);
printf("]\n");
}
void ispis2(const char *naziv, int *niz, int velicina, int jump) {
printf("%s = [%d", naziv, niz[0]);
for (int i = 0; i < velicina; i=i+jump) printf(", %d", niz[i]);
printf("]\n");
} |
b5f2be38aeb2c79cefcf5f93ce526bb29ef7d626.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Contributors: Yizhao Gao (yizhaotsccsj@gmail.com)
*/
#include <stdio.h>
#include "kde.cuh"
#include "cudaErrorCheck.cu"
#define BLOCKSIZE 16
__global__ void kdeKernel(float * dPop, float * dCase, int nRow, int nCol, float xMin, float yMax, float cellSize, float * dX, float * dY, float * dP, float * dC, int * dPoints, float bandwidth2, int blockBandwidth)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int idInThread = threadIdx.y * blockDim.x + threadIdx.x;
float cellX = xMin + cellSize * (j + 0.5);
float cellY = yMax - cellSize * (i + 0.5);
float denPop = 0.0f;
float denCase = 0.0f;
float dist2;
float weight;
int pointProcessed;
int pointToProcess;
int endPoint;
__shared__ float sX[BLOCKSIZE*BLOCKSIZE];
__shared__ float sY[BLOCKSIZE*BLOCKSIZE];
__shared__ float sP[BLOCKSIZE*BLOCKSIZE];
__shared__ float sC[BLOCKSIZE*BLOCKSIZE];
for(int k = 0; k < 1 + 2 * blockBandwidth; k ++)
{
int dataBID = (blockIdx.y + k) * (gridDim.x + 2 * blockBandwidth)+ blockIdx.x;
if(dataBID < 1)
{
pointProcessed = 0;
}
else
{
pointProcessed = dPoints[dataBID - 1];
}
endPoint = dPoints[dataBID + 2 * blockBandwidth];
pointToProcess = BLOCKSIZE * BLOCKSIZE;
for(; pointProcessed < endPoint; pointProcessed += BLOCKSIZE * BLOCKSIZE)
{
if(pointProcessed + pointToProcess > endPoint)
{
pointToProcess = endPoint - pointProcessed;
}
if(idInThread < pointToProcess)
{
sX[idInThread] = dX[pointProcessed + idInThread];
sY[idInThread] = dY[pointProcessed + idInThread];
sP[idInThread] = dP[pointProcessed + idInThread];
sC[idInThread] = dC[pointProcessed + idInThread];
}
__syncthreads();
for(int m = 0; m < pointToProcess; m++)
{
dist2 = (cellX - sX[m]) * (cellX - sX[m]) + (cellY - sY[m]) * (cellY - sY[m]);
if(dist2 < bandwidth2)
{
weight = (1 - dist2 / bandwidth2);
denPop += weight * sP[m];
denCase += weight * sC[m];
}
}
__syncthreads();
}
}
if(i < nRow && j < nCol && i > -1 && j > -1)
{
dPop[i * nCol + j] = denPop;
dCase[i * nCol + j] = denCase;
}
}
void kde(float * caseDen, float * popDen, int nRow, int nCol, float cellSize, float xMin, float yMax, float * xCol, float * yCol, float * pCount, float * cCount, int nHH, float bandwidth)
{
int gridX = ceil((float) nCol / BLOCKSIZE);
int gridY = ceil((float) nRow / BLOCKSIZE);
float blockSizeE = BLOCKSIZE * cellSize;
int blockBandwidth = ceil(bandwidth / blockSizeE);
printf("block bandwidth: %d\n", blockBandwidth);
int dataGridX = gridX + 2 * blockBandwidth;
int dataGridY = gridY + 2 * blockBandwidth;
float xMinData = xMin - blockSizeE * blockBandwidth;
float yMaxData = yMax + blockSizeE * blockBandwidth;
int rowID, colID, gridID;
int * nPointsB;
if(NULL == (nPointsB = (int *) malloc(sizeof(int) * dataGridX * dataGridY)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
nPointsB[i] = 0;
}
int * dGridID;
if(NULL == (dGridID = (int *) malloc(sizeof(int) * nHH)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < nHH; i++)
{
colID = (int)((xCol[i] - xMinData) / blockSizeE);
rowID = (int)((yMaxData - yCol[i]) / blockSizeE);
gridID = rowID * dataGridX + colID;
if(colID < 0 || colID >= dataGridX || rowID < 0 || rowID >= dataGridY)
{
dGridID[i] = -1;
}
else
{
nPointsB[gridID] ++;
dGridID[i] = gridID;
}
}
int nPointsIn = 0;
int * startIDB;
if(NULL == (startIDB = (int *) malloc(sizeof(int) * dataGridX * dataGridY)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
startIDB[i] = nPointsIn;
nPointsIn += nPointsB[i];
}
float * xColOrd;
float * yColOrd;
float * pCOrd;
float * cCOrd;
if(NULL == (xColOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (yColOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (pCOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (cCOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
nPointsB[i] = startIDB[i];
}
for(int i = 0; i < nHH; i++)
{
gridID = dGridID[i];
if(gridID < 0)
continue;
xColOrd[nPointsB[gridID]] = xCol[i];
yColOrd[nPointsB[gridID]] = yCol[i];
pCOrd[nPointsB[gridID]] = pCount[i];
cCOrd[nPointsB[gridID]] = cCount[i];
nPointsB[gridID] ++;
}
float * dX;
float * dY;
float * dP;
float * dC;
int * dPoints;
float * dPop;
float * dCase;
dim3 dimBlock (BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid (gridX, gridY);
CudaSafeCall(hipMalloc((void **) &dPop, sizeof(float) * nRow * nCol));
CudaSafeCall(hipMalloc((void **) &dCase, sizeof(float) * nRow * nCol));
CudaSafeCall(hipMalloc((void **) &dX, sizeof(float) * nPointsIn));
CudaSafeCall(hipMalloc((void **) &dY, sizeof(float) * nPointsIn));
CudaSafeCall(hipMalloc((void **) &dP, sizeof(float) * nPointsIn));
CudaSafeCall(hipMalloc((void **) &dC, sizeof(float) * nPointsIn));
CudaSafeCall(hipMalloc((void **) &dPoints, sizeof(int) * dataGridX * dataGridY));
CudaSafeCall(hipMemcpy(dX, xColOrd, sizeof(float) * nPointsIn, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dY, yColOrd, sizeof(float) * nPointsIn, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dP, pCOrd, sizeof(float) * nPointsIn, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dC, cCOrd, sizeof(float) * nPointsIn, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(dPoints, nPointsB, sizeof(int) * dataGridX * dataGridY, hipMemcpyHostToDevice));
CudaCheckError();
//Kernel Goes here
hipLaunchKernelGGL(( kdeKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dPop, dCase, nRow, nCol, xMin, yMax, cellSize, dX, dY, dP, dC, dPoints, bandwidth * bandwidth, blockBandwidth);
CudaSafeCall(hipMemcpy(popDen, dPop, sizeof(float) * nRow * nCol, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(caseDen, dCase, sizeof(float) * nRow * nCol, hipMemcpyDeviceToHost));
hipFree(dPop);
hipFree(dCase);
hipFree(dX);
hipFree(dY);
hipFree(dP);
hipFree(dC);
hipFree(dPoints);
free(xColOrd);
free(yColOrd);
free(pCOrd);
free(cCOrd);
free(dGridID);
free(nPointsB);
free(startIDB);
}
| b5f2be38aeb2c79cefcf5f93ce526bb29ef7d626.cu | /*
Contributors: Yizhao Gao (yizhaotsccsj@gmail.com)
*/
#include <stdio.h>
#include "kde.cuh"
#include "cudaErrorCheck.cu"
#define BLOCKSIZE 16
__global__ void kdeKernel(float * dPop, float * dCase, int nRow, int nCol, float xMin, float yMax, float cellSize, float * dX, float * dY, float * dP, float * dC, int * dPoints, float bandwidth2, int blockBandwidth)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int idInThread = threadIdx.y * blockDim.x + threadIdx.x;
float cellX = xMin + cellSize * (j + 0.5);
float cellY = yMax - cellSize * (i + 0.5);
float denPop = 0.0f;
float denCase = 0.0f;
float dist2;
float weight;
int pointProcessed;
int pointToProcess;
int endPoint;
__shared__ float sX[BLOCKSIZE*BLOCKSIZE];
__shared__ float sY[BLOCKSIZE*BLOCKSIZE];
__shared__ float sP[BLOCKSIZE*BLOCKSIZE];
__shared__ float sC[BLOCKSIZE*BLOCKSIZE];
for(int k = 0; k < 1 + 2 * blockBandwidth; k ++)
{
int dataBID = (blockIdx.y + k) * (gridDim.x + 2 * blockBandwidth)+ blockIdx.x;
if(dataBID < 1)
{
pointProcessed = 0;
}
else
{
pointProcessed = dPoints[dataBID - 1];
}
endPoint = dPoints[dataBID + 2 * blockBandwidth];
pointToProcess = BLOCKSIZE * BLOCKSIZE;
for(; pointProcessed < endPoint; pointProcessed += BLOCKSIZE * BLOCKSIZE)
{
if(pointProcessed + pointToProcess > endPoint)
{
pointToProcess = endPoint - pointProcessed;
}
if(idInThread < pointToProcess)
{
sX[idInThread] = dX[pointProcessed + idInThread];
sY[idInThread] = dY[pointProcessed + idInThread];
sP[idInThread] = dP[pointProcessed + idInThread];
sC[idInThread] = dC[pointProcessed + idInThread];
}
__syncthreads();
for(int m = 0; m < pointToProcess; m++)
{
dist2 = (cellX - sX[m]) * (cellX - sX[m]) + (cellY - sY[m]) * (cellY - sY[m]);
if(dist2 < bandwidth2)
{
weight = (1 - dist2 / bandwidth2);
denPop += weight * sP[m];
denCase += weight * sC[m];
}
}
__syncthreads();
}
}
if(i < nRow && j < nCol && i > -1 && j > -1)
{
dPop[i * nCol + j] = denPop;
dCase[i * nCol + j] = denCase;
}
}
void kde(float * caseDen, float * popDen, int nRow, int nCol, float cellSize, float xMin, float yMax, float * xCol, float * yCol, float * pCount, float * cCount, int nHH, float bandwidth)
{
int gridX = ceil((float) nCol / BLOCKSIZE);
int gridY = ceil((float) nRow / BLOCKSIZE);
float blockSizeE = BLOCKSIZE * cellSize;
int blockBandwidth = ceil(bandwidth / blockSizeE);
printf("block bandwidth: %d\n", blockBandwidth);
int dataGridX = gridX + 2 * blockBandwidth;
int dataGridY = gridY + 2 * blockBandwidth;
float xMinData = xMin - blockSizeE * blockBandwidth;
float yMaxData = yMax + blockSizeE * blockBandwidth;
int rowID, colID, gridID;
int * nPointsB;
if(NULL == (nPointsB = (int *) malloc(sizeof(int) * dataGridX * dataGridY)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
nPointsB[i] = 0;
}
int * dGridID;
if(NULL == (dGridID = (int *) malloc(sizeof(int) * nHH)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < nHH; i++)
{
colID = (int)((xCol[i] - xMinData) / blockSizeE);
rowID = (int)((yMaxData - yCol[i]) / blockSizeE);
gridID = rowID * dataGridX + colID;
if(colID < 0 || colID >= dataGridX || rowID < 0 || rowID >= dataGridY)
{
dGridID[i] = -1;
}
else
{
nPointsB[gridID] ++;
dGridID[i] = gridID;
}
}
int nPointsIn = 0;
int * startIDB;
if(NULL == (startIDB = (int *) malloc(sizeof(int) * dataGridX * dataGridY)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
startIDB[i] = nPointsIn;
nPointsIn += nPointsB[i];
}
float * xColOrd;
float * yColOrd;
float * pCOrd;
float * cCOrd;
if(NULL == (xColOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (yColOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (pCOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (cCOrd = (float *) malloc(sizeof(float) * nPointsIn)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < dataGridX * dataGridY; i++)
{
nPointsB[i] = startIDB[i];
}
for(int i = 0; i < nHH; i++)
{
gridID = dGridID[i];
if(gridID < 0)
continue;
xColOrd[nPointsB[gridID]] = xCol[i];
yColOrd[nPointsB[gridID]] = yCol[i];
pCOrd[nPointsB[gridID]] = pCount[i];
cCOrd[nPointsB[gridID]] = cCount[i];
nPointsB[gridID] ++;
}
float * dX;
float * dY;
float * dP;
float * dC;
int * dPoints;
float * dPop;
float * dCase;
dim3 dimBlock (BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid (gridX, gridY);
CudaSafeCall(cudaMalloc((void **) &dPop, sizeof(float) * nRow * nCol));
CudaSafeCall(cudaMalloc((void **) &dCase, sizeof(float) * nRow * nCol));
CudaSafeCall(cudaMalloc((void **) &dX, sizeof(float) * nPointsIn));
CudaSafeCall(cudaMalloc((void **) &dY, sizeof(float) * nPointsIn));
CudaSafeCall(cudaMalloc((void **) &dP, sizeof(float) * nPointsIn));
CudaSafeCall(cudaMalloc((void **) &dC, sizeof(float) * nPointsIn));
CudaSafeCall(cudaMalloc((void **) &dPoints, sizeof(int) * dataGridX * dataGridY));
CudaSafeCall(cudaMemcpy(dX, xColOrd, sizeof(float) * nPointsIn, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dY, yColOrd, sizeof(float) * nPointsIn, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dP, pCOrd, sizeof(float) * nPointsIn, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dC, cCOrd, sizeof(float) * nPointsIn, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dPoints, nPointsB, sizeof(int) * dataGridX * dataGridY, cudaMemcpyHostToDevice));
CudaCheckError();
//Kernel Goes here
kdeKernel<<<dimGrid, dimBlock>>>(dPop, dCase, nRow, nCol, xMin, yMax, cellSize, dX, dY, dP, dC, dPoints, bandwidth * bandwidth, blockBandwidth);
CudaSafeCall(cudaMemcpy(popDen, dPop, sizeof(float) * nRow * nCol, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(caseDen, dCase, sizeof(float) * nRow * nCol, cudaMemcpyDeviceToHost));
cudaFree(dPop);
cudaFree(dCase);
cudaFree(dX);
cudaFree(dY);
cudaFree(dP);
cudaFree(dC);
cudaFree(dPoints);
free(xColOrd);
free(yColOrd);
free(pCOrd);
free(cCOrd);
free(dGridID);
free(nPointsB);
free(startIDB);
}
|
3a2ec0fe9c1af8ecd7cda749230398c1630d5366.hip | // !!! This is a file automatically generated by hipify!!!
/*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch_hip.cuh"
int main(int argc, char **argv) {
//For output
hipfftComplex *dmrs1_h;
hipfftComplex *dmrs2_h;
//Call the generate_dmrs_pusch Function
generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, "fixed", 6, 0, &dmrs1_h, &dmrs2_h);
//Print results
for (int i = 0; i < 72 ; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, dmrs1_h[i].x, dmrs1_h[i].y);
}
for (int i = 0; i < 72; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, dmrs2_h[i].x, dmrs2_h[i].y);
}
//To compare with MATLAB results
//Run the file (Demapper_Results.m)
FILE *results;
if ((results = freopen("dmrs_Results.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//output file
printf("clear; clc;\ndmrs1_real = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs1_h[i].x);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\ndmrs1_imag = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs1_h[i].y);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\n");
printf("dmrs1_CUDA = dmrs1_real + 1i * dmrs1_imag;\n");
printf("\ndmrs2_real = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs2_h[i].x);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\ndmrs2_imag = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs2_h[i].y);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\n");
printf("dmrs2_CUDA = dmrs2_real + 1i * dmrs2_imag;\n");
fclose(results);
return 0;
} | 3a2ec0fe9c1af8ecd7cda749230398c1630d5366.cu | /*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch.cuh"
int main(int argc, char **argv) {
//For output
cufftComplex *dmrs1_h;
cufftComplex *dmrs2_h;
//Call the generate_dmrs_pusch Function
generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, "fixed", 6, 0, &dmrs1_h, &dmrs2_h);
//Print results
for (int i = 0; i < 72 ; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, dmrs1_h[i].x, dmrs1_h[i].y);
}
for (int i = 0; i < 72; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, dmrs2_h[i].x, dmrs2_h[i].y);
}
//To compare with MATLAB results
//Run the file (Demapper_Results.m)
FILE *results;
if ((results = freopen("dmrs_Results.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//output file
printf("clear; clc;\ndmrs1_real = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs1_h[i].x);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\ndmrs1_imag = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs1_h[i].y);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\n");
printf("dmrs1_CUDA = dmrs1_real + 1i * dmrs1_imag;\n");
printf("\ndmrs2_real = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs2_h[i].x);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\ndmrs2_imag = [ ");
for (int i = 0; i < (72); i++)
{
printf("%10f", dmrs2_h[i].y);
if (i != ((72) - 1))
printf(",");
}
printf(" ];\n");
printf("dmrs2_CUDA = dmrs2_real + 1i * dmrs2_imag;\n");
fclose(results);
return 0;
} |
75d53dca39eaf1a9835771f00fedb6fac04ffec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from https://github.com/caffe2/caffe2/blob/master/caffe2/operators/roi_align_op.cu
// (Ignacio Rocco)
#include "ATen/NativeFunctions.h"
#include <cfloat>
namespace at {
namespace contrib {
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
const int CUDA_MAX_BLOCKS = 65535;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__host__ __device__ __forceinline__ float fmin(float a, float b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ float fmax(float a, float b) {
return a > b ? a : b;
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void roi_align_forward_kernel(
const int outputElements,
const T* bottom_data, // input tensor
const T* bottom_rois, // input rois
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* top_data) // output
{
// CUDA_1D_KERNEL_LOOP(index, nthreads) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < outputElements;
index += blockDim.x * gridDim.x)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = fmax(roi_end_w - roi_start_w, (T)1.);
T roi_height = fmax(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceilf(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
Tensor roi_align_forward_cuda(
const Tensor& input,
const Tensor& bottom_rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio)
{
// Input is the output of the last convolutional layer in the Backbone network, so
// it should be in the format of NCHW
AT_ASSERT(input.ndimension() == 4, "Input to RoI Align should be a NCHW Tensor");
// ROIs is the set of region proposals to process. It is a 2D Tensor where the first
// dim is the # of proposals, and the second dim is the n itself in the form
// [batch_index startW startH endW endH]
AT_ASSERT(bottom_rois.ndimension() == 2, "RoI Proposals should be a 2D Tensor, (batch_sz x proposals)");
AT_ASSERT(bottom_rois.size(1) == 5, "Proposals should be of the form [batch_index startW startH endW enH]");
auto proposals = bottom_rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
AT_ASSERT(input.is_contiguous(), "input must be contiguous");
AT_ASSERT(bottom_rois.is_contiguous(), "bottom_rois must be contiguous");
// dim3 block(512);
// dim3 grid((output.numel() + 512 - 1) / 512);
int64_t total_threads = output.numel();
int64_t blocks = fmin(GET_BLOCKS(total_threads),CUDA_MAX_BLOCKS);
hipLaunchKernelGGL(( roi_align_forward_kernel), dim3(blocks), dim3(CUDA_NUM_THREADS), 0, globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
output.numel(),
input.data<float>(),
bottom_rois.data<float>(),
static_cast<float>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
output.data<float>());
AT_ASSERT(hipGetLastError() == hipSuccess, "roi_align_forward_kernel failed");
return output;
}
} // at::contrib
} // at | 75d53dca39eaf1a9835771f00fedb6fac04ffec2.cu | // Adapted from https://github.com/caffe2/caffe2/blob/master/caffe2/operators/roi_align_op.cu
// (Ignacio Rocco)
#include "ATen/NativeFunctions.h"
#include <cfloat>
namespace at {
namespace contrib {
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
const int CUDA_MAX_BLOCKS = 65535;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__host__ __device__ __forceinline__ float fmin(float a, float b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ float fmax(float a, float b) {
return a > b ? a : b;
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void roi_align_forward_kernel(
const int outputElements,
const T* bottom_data, // input tensor
const T* bottom_rois, // input rois
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* top_data) // output
{
// CUDA_1D_KERNEL_LOOP(index, nthreads) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < outputElements;
index += blockDim.x * gridDim.x)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = fmax(roi_end_w - roi_start_w, (T)1.);
T roi_height = fmax(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceilf(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
Tensor roi_align_forward_cuda(
const Tensor& input,
const Tensor& bottom_rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio)
{
// Input is the output of the last convolutional layer in the Backbone network, so
// it should be in the format of NCHW
AT_ASSERT(input.ndimension() == 4, "Input to RoI Align should be a NCHW Tensor");
// ROIs is the set of region proposals to process. It is a 2D Tensor where the first
// dim is the # of proposals, and the second dim is the n itself in the form
// [batch_index startW startH endW endH]
AT_ASSERT(bottom_rois.ndimension() == 2, "RoI Proposals should be a 2D Tensor, (batch_sz x proposals)");
AT_ASSERT(bottom_rois.size(1) == 5, "Proposals should be of the form [batch_index startW startH endW enH]");
auto proposals = bottom_rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
AT_ASSERT(input.is_contiguous(), "input must be contiguous");
AT_ASSERT(bottom_rois.is_contiguous(), "bottom_rois must be contiguous");
// dim3 block(512);
// dim3 grid((output.numel() + 512 - 1) / 512);
int64_t total_threads = output.numel();
int64_t blocks = fmin(GET_BLOCKS(total_threads),CUDA_MAX_BLOCKS);
roi_align_forward_kernel<<<blocks, CUDA_NUM_THREADS, 0, globalContext().getCurrentCUDAStream()>>>(
output.numel(),
input.data<float>(),
bottom_rois.data<float>(),
static_cast<float>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
output.data<float>());
AT_ASSERT(cudaGetLastError() == cudaSuccess, "roi_align_forward_kernel failed");
return output;
}
} // at::contrib
} // at |
b378900b4d8c15e2a141fd0b9ccfccb130af3170.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
// Stringstream is a big hammer, but I want to rely on operator<< for dtype.
#include <sstream>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename in_t, typename out_t>
struct ScaleFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
float scale)
{
__shared__ int noop_smem;
if(threadIdx.x == 0)
noop_smem = *noop_gmem;
__syncthreads();
if(noop_smem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
in_t* in = (in_t*)tl.addresses[0][tensor_loc];
in += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[1][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_vals[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_vals[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
incoming_vals[ii] = static_cast<float>(in[i]);
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
if(isfinite(incoming_vals[ii]))
out[i] = static_cast<out_t>(incoming_vals[ii]*scale);
else
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
// *noop_gmem = 1 is NOT guaranteed to be seen immediately by thread 0. I wonder if
// we can rig block-wide and grid-wide short-circuiting with only one syncthreads.
// It's possible we can just lean on the cache (no smem or syncs) and still be fast.
if(threadIdx.x == 0)
noop_smem = *noop_gmem;
__syncthreads();
if(noop_smem == 1)
break;
}
}
};
void multi_tensor_scale_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float scale)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(TypeShim(tensor_lists[0][0].type()),
"multi_tensor_scale_cuda",
[&]
{
// using accscalar_t = acc_type<scalar_t, true>;
switch(tensor_lists[1][0].scalar_type())
{
case at::ScalarType::Half:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, at::Half>(),
scale);
break;
case at::ScalarType::Float:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, float>(),
scale);
break;
default:
std::stringstream ss;
ss << "multi_tensor_scale_cuda not implemented for output type = "
<< tensor_lists[1][0].dtype();
AT_ERROR(ss.str().c_str());
}
});
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
}
| b378900b4d8c15e2a141fd0b9ccfccb130af3170.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
// Stringstream is a big hammer, but I want to rely on operator<< for dtype.
#include <sstream>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename in_t, typename out_t>
struct ScaleFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
float scale)
{
__shared__ int noop_smem;
if(threadIdx.x == 0)
noop_smem = *noop_gmem;
__syncthreads();
if(noop_smem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
in_t* in = (in_t*)tl.addresses[0][tensor_loc];
in += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[1][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_vals[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
incoming_vals[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
incoming_vals[ii] = static_cast<float>(in[i]);
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
if(isfinite(incoming_vals[ii]))
out[i] = static_cast<out_t>(incoming_vals[ii]*scale);
else
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
// *noop_gmem = 1 is NOT guaranteed to be seen immediately by thread 0. I wonder if
// we can rig block-wide and grid-wide short-circuiting with only one syncthreads.
// It's possible we can just lean on the cache (no smem or syncs) and still be fast.
if(threadIdx.x == 0)
noop_smem = *noop_gmem;
__syncthreads();
if(noop_smem == 1)
break;
}
}
};
void multi_tensor_scale_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float scale)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(TypeShim(tensor_lists[0][0].type()),
"multi_tensor_scale_cuda",
[&]
{
// using accscalar_t = acc_type<scalar_t, true>;
switch(tensor_lists[1][0].scalar_type())
{
case at::ScalarType::Half:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, at::Half>(),
scale);
break;
case at::ScalarType::Float:
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
ScaleFunctor<scalar_t, float>(),
scale);
break;
default:
std::stringstream ss;
ss << "multi_tensor_scale_cuda not implemented for output type = "
<< tensor_lists[1][0].dtype();
AT_ERROR(ss.str().c_str());
}
});
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
}
|
746c46e5c9779227dd692169531789794673cd10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <nvisii/nvisii.h>
#include <optix_stubs.h>
// In the future, this file can be used for stuff that uses the CUDA Thrust library
__global__
void _reproject(glm::vec4 *sampleBuffer, glm::vec4 *t0AlbedoBuffer, glm::vec4 *t1AlbedoBuffer, glm::vec4 *mvecBuffer, glm::vec4 *scratchBuffer, glm::vec4 *imageBuffer, bool copy, int width, int height)
{
// Compute column and row indices.
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * width + c; // 1D flat index
if (i >= (width * height)) return;
if (copy == true) {
scratchBuffer[i] = imageBuffer[i];
return;
}
glm::vec2 mvec = -glm::vec2(mvecBuffer[i]) * glm::vec2(width, height);
glm::vec2 p = glm::vec2(c, r);// / glm::vec2(width, height);
// mvec = -mvec * glm::vec4(width, height, 0, 0);
// mvec.x = 0.0f;
float weight = .95;
glm::ivec2 reproj = ivec2(p + mvec);
if (any(greaterThan(reproj, ivec2(width - 1, height - 1)))) weight = 0.0f;
if (any(lessThan(reproj, ivec2(0, 0)))) weight = 0.0f;
if (mvecBuffer[i].w < 0) weight = 0.f;
const int i_reproj = reproj.y * width + reproj.x; // 1D flat index
glm::vec4 oldCol = scratchBuffer[i_reproj];
glm::vec4 curCol = sampleBuffer[i];
glm::vec4 oldAlb = t0AlbedoBuffer[i_reproj];
glm::vec4 curAlb = t1AlbedoBuffer[i];
if (!glm::all(glm::equal(oldAlb, curAlb))) weight = 0.f;
glm::vec4 newCol = glm::mix(curCol, oldCol, glm::vec4(weight));
imageBuffer[i] = newCol;
}
void reproject(glm::vec4 *sampleBuffer, glm::vec4 *t0AlbedoBuffer, glm::vec4 *t1AlbedoBuffer, glm::vec4 *mvecBuffer, glm::vec4 *scratchBuffer, glm::vec4 *imageBuffer, int width, int height)
{
// TEMPORARY, reproject
dim3 blockSize(32,32);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3 (bx, by);
hipLaunchKernelGGL(( _reproject), dim3(gridSize),dim3(blockSize), 0, 0, sampleBuffer, t0AlbedoBuffer, t1AlbedoBuffer, mvecBuffer, scratchBuffer, imageBuffer, true, width, height);
hipLaunchKernelGGL(( _reproject), dim3(gridSize),dim3(blockSize), 0, 0, sampleBuffer, t0AlbedoBuffer, t1AlbedoBuffer, mvecBuffer, scratchBuffer, imageBuffer, false, width, height);
}
| 746c46e5c9779227dd692169531789794673cd10.cu | #include <nvisii/nvisii.h>
#include <optix_stubs.h>
// In the future, this file can be used for stuff that uses the CUDA Thrust library
__global__
void _reproject(glm::vec4 *sampleBuffer, glm::vec4 *t0AlbedoBuffer, glm::vec4 *t1AlbedoBuffer, glm::vec4 *mvecBuffer, glm::vec4 *scratchBuffer, glm::vec4 *imageBuffer, bool copy, int width, int height)
{
// Compute column and row indices.
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * width + c; // 1D flat index
if (i >= (width * height)) return;
if (copy == true) {
scratchBuffer[i] = imageBuffer[i];
return;
}
glm::vec2 mvec = -glm::vec2(mvecBuffer[i]) * glm::vec2(width, height);
glm::vec2 p = glm::vec2(c, r);// / glm::vec2(width, height);
// mvec = -mvec * glm::vec4(width, height, 0, 0);
// mvec.x = 0.0f;
float weight = .95;
glm::ivec2 reproj = ivec2(p + mvec);
if (any(greaterThan(reproj, ivec2(width - 1, height - 1)))) weight = 0.0f;
if (any(lessThan(reproj, ivec2(0, 0)))) weight = 0.0f;
if (mvecBuffer[i].w < 0) weight = 0.f;
const int i_reproj = reproj.y * width + reproj.x; // 1D flat index
glm::vec4 oldCol = scratchBuffer[i_reproj];
glm::vec4 curCol = sampleBuffer[i];
glm::vec4 oldAlb = t0AlbedoBuffer[i_reproj];
glm::vec4 curAlb = t1AlbedoBuffer[i];
if (!glm::all(glm::equal(oldAlb, curAlb))) weight = 0.f;
glm::vec4 newCol = glm::mix(curCol, oldCol, glm::vec4(weight));
imageBuffer[i] = newCol;
}
void reproject(glm::vec4 *sampleBuffer, glm::vec4 *t0AlbedoBuffer, glm::vec4 *t1AlbedoBuffer, glm::vec4 *mvecBuffer, glm::vec4 *scratchBuffer, glm::vec4 *imageBuffer, int width, int height)
{
// TEMPORARY, reproject
dim3 blockSize(32,32);
int bx = (width + blockSize.x - 1) / blockSize.x;
int by = (height + blockSize.y - 1) / blockSize.y;
dim3 gridSize = dim3 (bx, by);
_reproject<<<gridSize,blockSize>>>(sampleBuffer, t0AlbedoBuffer, t1AlbedoBuffer, mvecBuffer, scratchBuffer, imageBuffer, true, width, height);
_reproject<<<gridSize,blockSize>>>(sampleBuffer, t0AlbedoBuffer, t1AlbedoBuffer, mvecBuffer, scratchBuffer, imageBuffer, false, width, height);
}
|
d3359b981ebbe6fa8266b953d877d67179c00fc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellx;
int xdim0_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellx;
int ydim0_initialise_chunk_kernel_cellx_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellx;
int xdim1_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellx;
int ydim1_initialise_chunk_kernel_cellx_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellx;
int xdim2_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellx;
int ydim2_initialise_chunk_kernel_cellx_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellx * (y) + \
xdim0_initialise_chunk_kernel_cellx * ydim0_initialise_chunk_kernel_cellx * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellx * (y) + \
xdim1_initialise_chunk_kernel_cellx * ydim1_initialise_chunk_kernel_cellx * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellx * (y) + \
xdim2_initialise_chunk_kernel_cellx * ydim2_initialise_chunk_kernel_cellx * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellx_gpu(const double *vertexx, double *cellx,
double *celldx) {
double d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells;
cellx[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexx[OPS_ACC0(0, 0, 0)] + vertexx[OPS_ACC0(1, 0, 0)]);
celldx[OPS_ACC2(0, 0, 0)] = d_x;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellx(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim0_initialise_chunk_kernel_cellx *
ydim0_initialise_chunk_kernel_cellx;
arg1 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_cellx *
ydim1_initialise_chunk_kernel_cellx;
arg2 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim2_initialise_chunk_kernel_cellx *
ydim2_initialise_chunk_kernel_cellx;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellx_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_cellx(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_initialise_chunk_kernel_cellx_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 6))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(6, "initialise_chunk_kernel_cellx");
OPS_kernels[6].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellx_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellx_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellx_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellx_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellx_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellx_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellx, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellx_h = xdim0;
hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellx, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellx_h = ydim0;
hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellx, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellx_h = xdim1;
hipMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellx, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellx_h = ydim1;
hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellx, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellx_h = xdim2;
hipMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellx, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellx_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[6].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_cellx), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[6].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[6].mpi_time += t2 - t1;
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_cellx(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 6;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 6;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_cellx_execute;
if (OPS_diags > 1) {
ops_timing_realloc(6, "initialise_chunk_kernel_cellx");
}
ops_enqueue_kernel(desc);
}
#endif
| d3359b981ebbe6fa8266b953d877d67179c00fc1.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellx;
int xdim0_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellx;
int ydim0_initialise_chunk_kernel_cellx_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellx;
int xdim1_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellx;
int ydim1_initialise_chunk_kernel_cellx_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellx;
int xdim2_initialise_chunk_kernel_cellx_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellx;
int ydim2_initialise_chunk_kernel_cellx_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellx * (y) + \
xdim0_initialise_chunk_kernel_cellx * ydim0_initialise_chunk_kernel_cellx * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellx * (y) + \
xdim1_initialise_chunk_kernel_cellx * ydim1_initialise_chunk_kernel_cellx * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellx * (y) + \
xdim2_initialise_chunk_kernel_cellx * ydim2_initialise_chunk_kernel_cellx * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellx_gpu(const double *vertexx, double *cellx,
double *celldx) {
double d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells;
cellx[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexx[OPS_ACC0(0, 0, 0)] + vertexx[OPS_ACC0(1, 0, 0)]);
celldx[OPS_ACC2(0, 0, 0)] = d_x;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellx(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim0_initialise_chunk_kernel_cellx *
ydim0_initialise_chunk_kernel_cellx;
arg1 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_cellx *
ydim1_initialise_chunk_kernel_cellx;
arg2 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellx +
idx_z * 0 * 1 * xdim2_initialise_chunk_kernel_cellx *
ydim2_initialise_chunk_kernel_cellx;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellx_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_cellx(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_initialise_chunk_kernel_cellx_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 6))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(6, "initialise_chunk_kernel_cellx");
OPS_kernels[6].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellx_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellx_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellx_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellx_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellx_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellx_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellx, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellx_h = xdim0;
cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellx, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellx_h = ydim0;
cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellx, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellx_h = xdim1;
cudaMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellx, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellx_h = ydim1;
cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellx, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellx_h = xdim2;
cudaMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellx, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellx_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[6].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_cellx<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[6].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[6].mpi_time += t2 - t1;
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_cellx(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 6;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 6;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_cellx_execute;
if (OPS_diags > 1) {
ops_timing_realloc(6, "initialise_chunk_kernel_cellx");
}
ops_enqueue_kernel(desc);
}
#endif
|
31c9cbc349516deca46f8fb0cce18f1c44de136a.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMasked.cuh>
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMasked.cu"
#include <THH/THHGenerateCharType.h>
| 31c9cbc349516deca46f8fb0cce18f1c44de136a.cu | #include <THC/THCTensorMasked.cuh>
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMasked.cu"
#include <THC/THCGenerateCharType.h>
|
6a8ce2cca9df786c888a7f484c88cba7bfa33619.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_erfc_inv (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(erfcinv)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} | 6a8ce2cca9df786c888a7f484c88cba7bfa33619.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_erfc_inv (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(erfcinv)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} |
c15559593131d46de5aed33c65448c09a38925eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cuda implementation of collatz conjecture
*/
#include <iostream>
#include <sstream>
#include <boost/format.hpp>
#include <time.h>
#include <signal.h>
__global__
void collatz(int n, long long unsigned int threshold, int* status) {
int k = 2 * ((blockIdx.x * blockDim.x) + threadIdx.x);
if (k < n) {
// Use tid to get offset
long long unsigned int number = threshold + k + 1; // +1 since we took off one from currentnum to get threshold
for (int j = 0; j < 2; j++) {
while (number > 1 && number > threshold) {
if (number % 2 == 0)
number = number >> 1;
else
number = ((number * 3) + 1) >> 1;
}
if ((number > 1 && number <= threshold) || (number == 1))
number = 1;
else
*status = 0;
}
}
}
bool keep_going;
void signalHandler(int sig) {
std::cout << "Received SIGINT. Exiting..." << std::endl;
keep_going = false;
}
int main(int argc, char* argv[]) {
// first thing's first, start signal handler to catch SIGINT and exit gracefully
keep_going = true;
signal(SIGINT, signalHandler);
long long unsigned int currentNumber = 1;
// let's have the user specify a start number if they want
std::cout << "Enter a number greater than or equal to 1 to start from [default: 1]: ";
std::string buffer;
getline(std::cin, buffer);
if (!buffer.empty()) {
std::stringstream ss(buffer);
ss >> currentNumber;
}
if (currentNumber < 1)
currentNumber = 1;
int N = 1<<21;
struct timespec tp_start, tp_loopstart, tp_end;
// Create status variable space
int status, *d_status;
hipError_t err = hipMalloc(&d_status, sizeof(int));
if (err != hipSuccess) {
std::cout << "hipMalloc failed, did you forget optirun?" << std::endl;
return 1;
}
// start main loop
clock_gettime(CLOCK_REALTIME, &tp_start);
clock_gettime(CLOCK_REALTIME, &tp_loopstart);
while (keep_going) {
// log timestamp and stats to console
clock_gettime(CLOCK_REALTIME, &tp_end);
if (tp_end.tv_sec - tp_loopstart.tv_sec > 4) {
std::stringstream timestamp;
timestamp << "Uptime: ";
timestamp << boost::str(boost::format("%02d:%02d")
% ((tp_end.tv_sec - tp_start.tv_sec) / 60)
% ((tp_end.tv_sec - tp_start.tv_sec) % 60));
timestamp << ". ";
timestamp << "Current number: ";
timestamp << boost::str(boost::format("%12d") % currentNumber);
timestamp << std::endl;
std::cout << timestamp.str();
clock_gettime(CLOCK_REALTIME, &tp_loopstart);
}
// initialize status to success
status = 1;
hipMemcpy(d_status, &status, sizeof(int), hipMemcpyHostToDevice);
// perform collatz on entire array
hipLaunchKernelGGL(( collatz), dim3(((N/2)+255)/256), dim3(256), 0, 0, N, (currentNumber-1), d_status);
// bring status back
hipMemcpy(&status, d_status, sizeof(int), hipMemcpyDeviceToHost);
// just check status variable
if (status == 0) {
std::cout << "Collatz failed" << std::endl;
break;
}
currentNumber += N;
}
std::cout << "While loop broken. Cleaning up..." << std::endl;
hipFree(d_status);
return 0;
}
| c15559593131d46de5aed33c65448c09a38925eb.cu | /*
* cuda implementation of collatz conjecture
*/
#include <iostream>
#include <sstream>
#include <boost/format.hpp>
#include <time.h>
#include <signal.h>
__global__
void collatz(int n, long long unsigned int threshold, int* status) {
int k = 2 * ((blockIdx.x * blockDim.x) + threadIdx.x);
if (k < n) {
// Use tid to get offset
long long unsigned int number = threshold + k + 1; // +1 since we took off one from currentnum to get threshold
for (int j = 0; j < 2; j++) {
while (number > 1 && number > threshold) {
if (number % 2 == 0)
number = number >> 1;
else
number = ((number * 3) + 1) >> 1;
}
if ((number > 1 && number <= threshold) || (number == 1))
number = 1;
else
*status = 0;
}
}
}
bool keep_going;
void signalHandler(int sig) {
std::cout << "Received SIGINT. Exiting..." << std::endl;
keep_going = false;
}
int main(int argc, char* argv[]) {
// first thing's first, start signal handler to catch SIGINT and exit gracefully
keep_going = true;
signal(SIGINT, signalHandler);
long long unsigned int currentNumber = 1;
// let's have the user specify a start number if they want
std::cout << "Enter a number greater than or equal to 1 to start from [default: 1]: ";
std::string buffer;
getline(std::cin, buffer);
if (!buffer.empty()) {
std::stringstream ss(buffer);
ss >> currentNumber;
}
if (currentNumber < 1)
currentNumber = 1;
int N = 1<<21;
struct timespec tp_start, tp_loopstart, tp_end;
// Create status variable space
int status, *d_status;
cudaError_t err = cudaMalloc(&d_status, sizeof(int));
if (err != cudaSuccess) {
std::cout << "cudaMalloc failed, did you forget optirun?" << std::endl;
return 1;
}
// start main loop
clock_gettime(CLOCK_REALTIME, &tp_start);
clock_gettime(CLOCK_REALTIME, &tp_loopstart);
while (keep_going) {
// log timestamp and stats to console
clock_gettime(CLOCK_REALTIME, &tp_end);
if (tp_end.tv_sec - tp_loopstart.tv_sec > 4) {
std::stringstream timestamp;
timestamp << "Uptime: ";
timestamp << boost::str(boost::format("%02d:%02d")
% ((tp_end.tv_sec - tp_start.tv_sec) / 60)
% ((tp_end.tv_sec - tp_start.tv_sec) % 60));
timestamp << ". ";
timestamp << "Current number: ";
timestamp << boost::str(boost::format("%12d") % currentNumber);
timestamp << std::endl;
std::cout << timestamp.str();
clock_gettime(CLOCK_REALTIME, &tp_loopstart);
}
// initialize status to success
status = 1;
cudaMemcpy(d_status, &status, sizeof(int), cudaMemcpyHostToDevice);
// perform collatz on entire array
collatz<<<((N/2)+255)/256, 256>>>(N, (currentNumber-1), d_status);
// bring status back
cudaMemcpy(&status, d_status, sizeof(int), cudaMemcpyDeviceToHost);
// just check status variable
if (status == 0) {
std::cout << "Collatz failed" << std::endl;
break;
}
currentNumber += N;
}
std::cout << "While loop broken. Cleaning up..." << std::endl;
cudaFree(d_status);
return 0;
}
|
fac41b616c55c4aa73136001df5b0d748a186d5f.hip | // !!! This is a file automatically generated by hipify!!!
//author: Alexandr Khozhanov
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iterator>
#include <numeric>
#include <stdio.h>
#include <vector>
#include <algorithm>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] - b[i];
}
int main(int argc, char** argv)
{
int array_size = 0;
scanf("%d", &array_size);
getchar();
printf("Your array size: %d", array_size);
srand(NULL);
std::vector<int> a(array_size);
std::vector<int> b(array_size);
std::vector<int> c(array_size);
std::generate(a.begin(), a.end(), rand);
std::generate(b.begin(), b.end(), rand);
int *A = new int[array_size];
int *B = new int[array_size];
int *C = new int[array_size];
std::copy(a.begin(), a.end(), A);
std::copy(b.begin(), b.end(), B);
std::copy(c.begin(), c.end(), C);
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(C, A, B, array_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
getchar();
return 1;
}
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", A[i]);
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", B[i]);
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", C[i]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
getchar();
return 1;
}
getchar();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| fac41b616c55c4aa73136001df5b0d748a186d5f.cu |
//author: Alexandr Khozhanov
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iterator>
#include <numeric>
#include <stdio.h>
#include <vector>
#include <algorithm>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] - b[i];
}
int main(int argc, char** argv)
{
int array_size = 0;
scanf("%d", &array_size);
getchar();
printf("Your array size: %d", array_size);
srand(NULL);
std::vector<int> a(array_size);
std::vector<int> b(array_size);
std::vector<int> c(array_size);
std::generate(a.begin(), a.end(), rand);
std::generate(b.begin(), b.end(), rand);
int *A = new int[array_size];
int *B = new int[array_size];
int *C = new int[array_size];
std::copy(a.begin(), a.end(), A);
std::copy(b.begin(), b.end(), B);
std::copy(c.begin(), c.end(), C);
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(C, A, B, array_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
getchar();
return 1;
}
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", A[i]);
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", B[i]);
printf("\n");
for (int i = 0; i < array_size; i++)
printf("%d ", C[i]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
getchar();
return 1;
}
getchar();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
57d738a577fc81ffac0b6d2a0b66109a55ca0e4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Calib.h"
#include <algorithm>
#include <string>
Calib::Calib(size_t numGenScans){
checkForCUDA();
genStore.setupGenList(numGenScans);
}
Calib::~Calib(void){
}
bool Calib::getIfPanoramic(size_t idx){
return NULL;
}
size_t Calib::getNumPoints(size_t idx){
return moveStore.getNumPoints(idx);
}
size_t Calib::getNumDim(size_t idx){
return moveStore.getNumDim(idx);
}
size_t Calib::getImageDepth(size_t idx){
return baseStore.getDepth(idx);
}
size_t Calib::getNumCh(size_t idx){
return moveStore.getNumCh(scanIdx[idx]);
}
size_t Calib::getNumImages(void){
return baseStore.getNumImages();
}
size_t Calib::getImageWidth(size_t idx){
return baseStore.getWidth(idx);
}
size_t Calib::getImageHeight(size_t idx){
return baseStore.getHeight(idx);
}
void Calib::clearScans(void){
moveStore.removeAllScans();
}
void Calib::clearImages(void){
baseStore.removeAllImages();
}
void Calib::clearTforms(void){
return;
}
void Calib::clearExtras(void){
return;
}
void Calib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
}
void Calib::addScan(std::vector<thrust::host_vector<float>>& scanLIn, std::vector<thrust::host_vector<float>>& scanIIn){
moveStore.addScan(scanLIn, scanIIn);
}
void Calib::addImage(thrust::host_vector<float>& imageIn, size_t height, size_t width, size_t depth){
baseStore.addImage(imageIn, height, width, depth);
}
/*void Calib::addTform(thrust::host_vector<float>& tformIn, size_t tformSizeX, size_t tformSizeY){
tformStore.addTforms(tformIn, tformSizeX, tformSizeY);
}*/
void Calib::addTform(thrust::host_vector<float>& tformIn){
return;
}
float Calib::evalMetric(void){
return 0;
}
void Calib::addTformIndices(std::vector<size_t>& tformsIdxIn){
tformIdx.insert(tformIdx.end(), tformsIdxIn.begin(), tformsIdxIn.end());
}
void Calib::addScanIndices(std::vector<size_t>& scansIdxIn){
scanIdx.insert(scanIdx.end(),scansIdxIn.begin(), scansIdxIn.end());
}
void Calib::setSSDMetric(void){
metric = new SSD();
}
void Calib::setGOMMetric(void){
metric = new GOM();
}
void Calib::setGOMSMetric(void){
metric = new GOMS();
}
void Calib::setMIMetric(void){
metric = new MI(50);
}
void Calib::setNMIMetric(void){
metric = new NMI(50);
}
void Calib::setLEVMetric(void){
metric = new LEV();
}
void Calib::addCameraIndices(std::vector<size_t>& cameraIdxIn){
mexErrMsgTxt("Attempted to setup camera for use with non-camera calibration");
return;
}
void Calib::addCamera(thrust::host_vector<float>& cameraIn, boolean panoramic){
mexErrMsgTxt("Attempted to setup camera for use with non-camera calibration");
return;
}
void Calib::getBaseImage(thrust::device_vector<float>& image, size_t idx){
image = baseStore.getImage(idx);
}
void Calib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
return;
}
void Calib::colourScan(float* scan, size_t idx){
return;
}
CameraCalib::CameraCalib(size_t numGen) : Calib(numGen){}
bool CameraCalib::getIfPanoramic(size_t idx){
return cameraStore.getPanoramic(idx);
}
void CameraCalib::clearTforms(void){
tformStore.removeAllTforms();
}
void CameraCalib::clearExtras(void){
cameraStore.removeAllCameras();
return;
}
void CameraCalib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
cameraIdx.clear();
}
void CameraCalib::addTform(thrust::host_vector<float>& tformIn){
tformStore.addTforms(tformIn);
}
void CameraCalib::addCameraIndices(std::vector<size_t>& cameraIdxIn){
cameraIdx.insert(cameraIdx.end(),cameraIdxIn.begin(), cameraIdxIn.end());
}
void CameraCalib::addCamera(thrust::host_vector<float>& cameraIn, boolean panoramic){
cameraStore.addCams(cameraIn, panoramic);
}
float CameraCalib::evalMetric(void){
std::vector<float> metricVal;
if(tformIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Transform index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(cameraIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Camera index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(scanIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Scan index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
float out = 0;
for(size_t j = 0; j < baseStore.getNumImages(); j+= genStore.getNumGen()){
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
for(size_t k = 0; k < IMAGE_DIM; k++){
CudaSafeCall(hipMemsetAsync(genStore.getGLP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
for(size_t k = 0; k < moveStore.getNumCh(scanIdx[j]); k++){
CudaSafeCall(hipMemsetAsync(genStore.getGIP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[j+i], cameraIdx[j+i], scanIdx[j+i],i);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
baseStore.interpolateImage(&moveStore, &genStore, j+i, scanIdx[j+i], i, true);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
out += metric->evalMetric(&moveStore, &genStore, scanIdx[j+i], i);
}
}
return out;
}
void CameraCalib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
if(imageColour){
image.resize(baseStore.getDepth(idx)*width*height);
}
else{
image.resize(moveStore.getNumCh(scanIdx[idx])*width*height);
}
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[idx], cameraIdx[idx], scanIdx[idx], 0);
hipDeviceSynchronize();
if(imageColour){
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
hipDeviceSynchronize();
for(size_t i = 0; i < baseStore.getDepth(idx); i++){
hipLaunchKernelGGL(( generateOutputKernel), dim3(gridSize(moveStore.getNumPoints(scanIdx[idx]))) ,dim3(BLOCK_SIZE), 0, 0,
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGIP(0,i,moveStore.getNumPoints(scanIdx[idx])),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
else{
for(size_t i = 0; i < moveStore.getNumCh(scanIdx[idx]); i++){
hipLaunchKernelGGL(( generateOutputKernel), dim3(gridSize(moveStore.getNumPoints(scanIdx[idx]))) ,dim3(BLOCK_SIZE), 0, 0,
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
moveStore.getIP(scanIdx[idx],i),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
CudaCheckError();
}
void CameraCalib::colourScan(float* scan, size_t idx){
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[idx], cameraIdx[idx], scanIdx[idx], 0);
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
hipDeviceSynchronize();
for(size_t j = 0; j < moveStore.getNumDim(scanIdx[idx]); j++){
hipMemcpy(&scan[j*moveStore.getNumPoints(scanIdx[idx])],moveStore.getLP(scanIdx[idx],j),moveStore.getNumPoints(scanIdx[idx])*sizeof(float),hipMemcpyDeviceToHost);
}
for(size_t j = 0; j < baseStore.getDepth(idx); j++){
hipMemcpy(&scan[(j+moveStore.getNumDim(scanIdx[idx]))*moveStore.getNumPoints(scanIdx[idx])],genStore.getGIP(0,j,moveStore.getNumPoints(scanIdx[idx])),moveStore.getNumPoints(scanIdx[idx])*sizeof(float),hipMemcpyDeviceToHost);
}
CudaCheckError();
}
ImageCalib::ImageCalib(size_t numGen) : Calib(numGen){}
bool ImageCalib::getIfPanoramic(size_t idx){
return NULL;
}
void ImageCalib::clearTforms(void){
tformStore.removeAllTforms();
}
void ImageCalib::clearExtras(void){
return;
}
void ImageCalib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
}
void ImageCalib::addTform(thrust::host_vector<float>& tformIn){
tformStore.addTforms(tformIn);
}
float ImageCalib::evalMetric(void){
std::vector<float> metricVal;
if(tformIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Transform index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(scanIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Scan index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
float out = 0;
for(size_t j = 0; j < baseStore.getNumImages(); j+= genStore.getNumGen()){
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
for(size_t k = 0; k < IMAGE_DIM; k++){
CudaSafeCall(hipMemsetAsync(genStore.getGLP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
for(size_t k = 0; k < moveStore.getNumCh(scanIdx[j]); k++){
CudaSafeCall(hipMemsetAsync(genStore.getGIP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[j+i], NULL, scanIdx[j+i],i);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
baseStore.interpolateImage(&moveStore, &genStore, j+i, scanIdx[j+i], i, true);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
out += metric->evalMetric(&moveStore, &genStore, scanIdx[j+i], i);
}
}
return out;
}
void ImageCalib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
if(imageColour){
image.resize(baseStore.getDepth(idx)*width*height);
}
else{
image.resize(moveStore.getNumCh(scanIdx[idx])*width*height);
}
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[idx], NULL, scanIdx[idx], 0);
hipDeviceSynchronize();
if(imageColour){
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
hipDeviceSynchronize();
for(size_t i = 0; i < baseStore.getDepth(idx); i++){
hipLaunchKernelGGL(( generateOutputKernel), dim3(gridSize(moveStore.getNumPoints(scanIdx[idx]))) ,dim3(BLOCK_SIZE), 0, 0,
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGIP(0,i,moveStore.getNumPoints(scanIdx[idx])),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
else{
for(size_t i = 0; i < moveStore.getNumCh(scanIdx[idx]); i++){
hipLaunchKernelGGL(( generateOutputKernel), dim3(gridSize(moveStore.getNumPoints(scanIdx[idx]))) ,dim3(BLOCK_SIZE), 0, 0,
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
moveStore.getIP(scanIdx[idx],i),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
CudaCheckError();
}
void ImageCalib::colourScan(float* scan, size_t idx){
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[idx], NULL, scanIdx[idx], 0);
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
hipDeviceSynchronize();
for(size_t j = 0; j < moveStore.getNumCh(idx); j++){
hipMemcpy(&scan[j*moveStore.getNumPoints(idx)],moveStore.getIP(idx,j),moveStore.getNumPoints(idx)*sizeof(float),hipMemcpyDeviceToHost);
}
for(size_t j = 0; j < baseStore.getDepth(idx); j++){
hipMemcpy(&scan[(j+moveStore.getNumCh(idx))*moveStore.getNumPoints(idx)],genStore.getGIP(0,j,moveStore.getNumPoints(scanIdx[idx])),moveStore.getNumPoints(idx)*sizeof(float),hipMemcpyDeviceToHost);
}
CudaCheckError();
}
| 57d738a577fc81ffac0b6d2a0b66109a55ca0e4c.cu | #include "Calib.h"
#include <algorithm>
#include <string>
Calib::Calib(size_t numGenScans){
checkForCUDA();
genStore.setupGenList(numGenScans);
}
Calib::~Calib(void){
}
bool Calib::getIfPanoramic(size_t idx){
return NULL;
}
size_t Calib::getNumPoints(size_t idx){
return moveStore.getNumPoints(idx);
}
size_t Calib::getNumDim(size_t idx){
return moveStore.getNumDim(idx);
}
size_t Calib::getImageDepth(size_t idx){
return baseStore.getDepth(idx);
}
size_t Calib::getNumCh(size_t idx){
return moveStore.getNumCh(scanIdx[idx]);
}
size_t Calib::getNumImages(void){
return baseStore.getNumImages();
}
size_t Calib::getImageWidth(size_t idx){
return baseStore.getWidth(idx);
}
size_t Calib::getImageHeight(size_t idx){
return baseStore.getHeight(idx);
}
void Calib::clearScans(void){
moveStore.removeAllScans();
}
void Calib::clearImages(void){
baseStore.removeAllImages();
}
void Calib::clearTforms(void){
return;
}
void Calib::clearExtras(void){
return;
}
void Calib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
}
void Calib::addScan(std::vector<thrust::host_vector<float>>& scanLIn, std::vector<thrust::host_vector<float>>& scanIIn){
moveStore.addScan(scanLIn, scanIIn);
}
void Calib::addImage(thrust::host_vector<float>& imageIn, size_t height, size_t width, size_t depth){
baseStore.addImage(imageIn, height, width, depth);
}
/*void Calib::addTform(thrust::host_vector<float>& tformIn, size_t tformSizeX, size_t tformSizeY){
tformStore.addTforms(tformIn, tformSizeX, tformSizeY);
}*/
void Calib::addTform(thrust::host_vector<float>& tformIn){
return;
}
float Calib::evalMetric(void){
return 0;
}
void Calib::addTformIndices(std::vector<size_t>& tformsIdxIn){
tformIdx.insert(tformIdx.end(), tformsIdxIn.begin(), tformsIdxIn.end());
}
void Calib::addScanIndices(std::vector<size_t>& scansIdxIn){
scanIdx.insert(scanIdx.end(),scansIdxIn.begin(), scansIdxIn.end());
}
void Calib::setSSDMetric(void){
metric = new SSD();
}
void Calib::setGOMMetric(void){
metric = new GOM();
}
void Calib::setGOMSMetric(void){
metric = new GOMS();
}
void Calib::setMIMetric(void){
metric = new MI(50);
}
void Calib::setNMIMetric(void){
metric = new NMI(50);
}
void Calib::setLEVMetric(void){
metric = new LEV();
}
void Calib::addCameraIndices(std::vector<size_t>& cameraIdxIn){
mexErrMsgTxt("Attempted to setup camera for use with non-camera calibration");
return;
}
void Calib::addCamera(thrust::host_vector<float>& cameraIn, boolean panoramic){
mexErrMsgTxt("Attempted to setup camera for use with non-camera calibration");
return;
}
void Calib::getBaseImage(thrust::device_vector<float>& image, size_t idx){
image = baseStore.getImage(idx);
}
void Calib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
return;
}
void Calib::colourScan(float* scan, size_t idx){
return;
}
CameraCalib::CameraCalib(size_t numGen) : Calib(numGen){}
bool CameraCalib::getIfPanoramic(size_t idx){
return cameraStore.getPanoramic(idx);
}
void CameraCalib::clearTforms(void){
tformStore.removeAllTforms();
}
void CameraCalib::clearExtras(void){
cameraStore.removeAllCameras();
return;
}
void CameraCalib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
cameraIdx.clear();
}
void CameraCalib::addTform(thrust::host_vector<float>& tformIn){
tformStore.addTforms(tformIn);
}
void CameraCalib::addCameraIndices(std::vector<size_t>& cameraIdxIn){
cameraIdx.insert(cameraIdx.end(),cameraIdxIn.begin(), cameraIdxIn.end());
}
void CameraCalib::addCamera(thrust::host_vector<float>& cameraIn, boolean panoramic){
cameraStore.addCams(cameraIn, panoramic);
}
float CameraCalib::evalMetric(void){
std::vector<float> metricVal;
if(tformIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Transform index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(cameraIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Camera index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(scanIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Scan index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
float out = 0;
for(size_t j = 0; j < baseStore.getNumImages(); j+= genStore.getNumGen()){
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
for(size_t k = 0; k < IMAGE_DIM; k++){
CudaSafeCall(cudaMemsetAsync(genStore.getGLP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
for(size_t k = 0; k < moveStore.getNumCh(scanIdx[j]); k++){
CudaSafeCall(cudaMemsetAsync(genStore.getGIP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[j+i], cameraIdx[j+i], scanIdx[j+i],i);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
baseStore.interpolateImage(&moveStore, &genStore, j+i, scanIdx[j+i], i, true);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
out += metric->evalMetric(&moveStore, &genStore, scanIdx[j+i], i);
}
}
return out;
}
void CameraCalib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
if(imageColour){
image.resize(baseStore.getDepth(idx)*width*height);
}
else{
image.resize(moveStore.getNumCh(scanIdx[idx])*width*height);
}
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[idx], cameraIdx[idx], scanIdx[idx], 0);
cudaDeviceSynchronize();
if(imageColour){
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
cudaDeviceSynchronize();
for(size_t i = 0; i < baseStore.getDepth(idx); i++){
generateOutputKernel<<<gridSize(moveStore.getNumPoints(scanIdx[idx])) ,BLOCK_SIZE>>>(
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGIP(0,i,moveStore.getNumPoints(scanIdx[idx])),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
else{
for(size_t i = 0; i < moveStore.getNumCh(scanIdx[idx]); i++){
generateOutputKernel<<<gridSize(moveStore.getNumPoints(scanIdx[idx])) ,BLOCK_SIZE>>>(
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
moveStore.getIP(scanIdx[idx],i),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
CudaCheckError();
}
void CameraCalib::colourScan(float* scan, size_t idx){
tformStore.transform(&moveStore, &cameraStore, &genStore, tformIdx[idx], cameraIdx[idx], scanIdx[idx], 0);
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
cudaDeviceSynchronize();
for(size_t j = 0; j < moveStore.getNumDim(scanIdx[idx]); j++){
cudaMemcpy(&scan[j*moveStore.getNumPoints(scanIdx[idx])],moveStore.getLP(scanIdx[idx],j),moveStore.getNumPoints(scanIdx[idx])*sizeof(float),cudaMemcpyDeviceToHost);
}
for(size_t j = 0; j < baseStore.getDepth(idx); j++){
cudaMemcpy(&scan[(j+moveStore.getNumDim(scanIdx[idx]))*moveStore.getNumPoints(scanIdx[idx])],genStore.getGIP(0,j,moveStore.getNumPoints(scanIdx[idx])),moveStore.getNumPoints(scanIdx[idx])*sizeof(float),cudaMemcpyDeviceToHost);
}
CudaCheckError();
}
ImageCalib::ImageCalib(size_t numGen) : Calib(numGen){}
bool ImageCalib::getIfPanoramic(size_t idx){
return NULL;
}
void ImageCalib::clearTforms(void){
tformStore.removeAllTforms();
}
void ImageCalib::clearExtras(void){
return;
}
void ImageCalib::clearIndices(void){
tformIdx.clear();
scanIdx.clear();
}
void ImageCalib::addTform(thrust::host_vector<float>& tformIn){
tformStore.addTforms(tformIn);
}
float ImageCalib::evalMetric(void){
std::vector<float> metricVal;
if(tformIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Transform index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
if(scanIdx.size() != baseStore.getNumImages()){
std::ostringstream err; err << "Scan index has not been correctly set up";
mexErrMsgTxt(err.str().c_str());
return 0;
}
float out = 0;
for(size_t j = 0; j < baseStore.getNumImages(); j+= genStore.getNumGen()){
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
for(size_t k = 0; k < IMAGE_DIM; k++){
CudaSafeCall(cudaMemsetAsync(genStore.getGLP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
for(size_t k = 0; k < moveStore.getNumCh(scanIdx[j]); k++){
CudaSafeCall(cudaMemsetAsync(genStore.getGIP(i,k,moveStore.getNumPoints(scanIdx[j+i])),0,moveStore.getNumPoints(scanIdx[j+i]),genStore.getStream(i)));
}
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[j+i], NULL, scanIdx[j+i],i);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
baseStore.interpolateImage(&moveStore, &genStore, j+i, scanIdx[j+i], i, true);
}
for(size_t i = 0; i < genStore.getNumGen(); i++){
if((i+j) >= baseStore.getNumImages()){
break;
}
out += metric->evalMetric(&moveStore, &genStore, scanIdx[j+i], i);
}
}
return out;
}
void ImageCalib::generateImage(thrust::device_vector<float>& image, size_t width, size_t height, size_t dilate, size_t idx, bool imageColour){
if(imageColour){
image.resize(baseStore.getDepth(idx)*width*height);
}
else{
image.resize(moveStore.getNumCh(scanIdx[idx])*width*height);
}
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[idx], NULL, scanIdx[idx], 0);
cudaDeviceSynchronize();
if(imageColour){
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
cudaDeviceSynchronize();
for(size_t i = 0; i < baseStore.getDepth(idx); i++){
generateOutputKernel<<<gridSize(moveStore.getNumPoints(scanIdx[idx])) ,BLOCK_SIZE>>>(
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGIP(0,i,moveStore.getNumPoints(scanIdx[idx])),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
else{
for(size_t i = 0; i < moveStore.getNumCh(scanIdx[idx]); i++){
generateOutputKernel<<<gridSize(moveStore.getNumPoints(scanIdx[idx])) ,BLOCK_SIZE>>>(
genStore.getGLP(0,0,moveStore.getNumPoints(scanIdx[idx])),
genStore.getGLP(0,1,moveStore.getNumPoints(scanIdx[idx])),
moveStore.getIP(scanIdx[idx],i),
thrust::raw_pointer_cast(&image[width*height*i]),
width,
height,
moveStore.getNumPoints(scanIdx[idx]),
dilate);
}
}
CudaCheckError();
}
void ImageCalib::colourScan(float* scan, size_t idx){
tformStore.transform(&moveStore, &noCamera, &genStore, tformIdx[idx], NULL, scanIdx[idx], 0);
baseStore.interpolateImage(&moveStore, &genStore, idx, scanIdx[idx], 0, true);
cudaDeviceSynchronize();
for(size_t j = 0; j < moveStore.getNumCh(idx); j++){
cudaMemcpy(&scan[j*moveStore.getNumPoints(idx)],moveStore.getIP(idx,j),moveStore.getNumPoints(idx)*sizeof(float),cudaMemcpyDeviceToHost);
}
for(size_t j = 0; j < baseStore.getDepth(idx); j++){
cudaMemcpy(&scan[(j+moveStore.getNumCh(idx))*moveStore.getNumPoints(idx)],genStore.getGIP(0,j,moveStore.getNumPoints(scanIdx[idx])),moveStore.getNumPoints(idx)*sizeof(float),cudaMemcpyDeviceToHost);
}
CudaCheckError();
}
|
763379564ec411d8fc260e56be4a4fb51881fc05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Geometry List.
// -------------------------------------------------------------------
// Copyright (C) 2010 OpenEngine.dk (See AUTHORS)
//
// This program is free software; It is covered by the GNU General
// Public License version 2 or any later version.
// See the GNU General Public License for more details (see LICENSE).
//--------------------------------------------------------------------
#include <Utils/CUDA/GeometryList.h>
#include <Geometry/Mesh.h>
#include <Geometry/GeometrySet.h>
#include <Math/CUDA/Matrix.h>
#include <Scene/ISceneNode.h>
#include <Scene/MeshNode.h>
#include <Scene/CUDAMeshNode.h>
#include <Scene/OscCUDAMeshNode.h>
#include <Scene/RenderStateNode.h>
#include <Scene/TransformationNode.h>
#include <Utils/CUDA/Utils.h>
#include <Utils/CUDA/IntersectionTests.h>
#include <Utils/CUDA/Convert.h>
#include <Utils/CUDA/LoggerExtensions.h>
#include <sstream>
using namespace OpenEngine::Geometry;
using namespace OpenEngine::Math;
using namespace OpenEngine::Math::CUDA;
using namespace OpenEngine::Scene;
using namespace OpenEngine::Resources::CUDA;
#define MAX_THREADS 128
namespace OpenEngine {
namespace Utils {
namespace CUDA {
GeometryList::GeometryList()
: maxSize(0), size(0) {}
GeometryList::GeometryList(int size)
: maxSize(size), size(0) {
cutCreateTimer(&timerID);
p0 = new CUDADataBlock<float4>(maxSize);
p1 = new CUDADataBlock<float4>(maxSize);
p2 = new CUDADataBlock<float4>(maxSize);
n0 = new CUDADataBlock<float4>(maxSize);
n1 = new CUDADataBlock<float4>(maxSize);
n2 = new CUDADataBlock<float4>(maxSize);
c0 = new CUDADataBlock<uchar4>(maxSize);
c1 = new CUDADataBlock<uchar4>(maxSize);
c2 = new CUDADataBlock<uchar4>(maxSize);
woop0 = new CUDADataBlock<float4>(maxSize);
woop1 = new CUDADataBlock<float4>(maxSize);
woop2 = new CUDADataBlock<float4>(maxSize);
}
void GeometryList::Resize(int i){
p0->Resize(i); p1->Resize(i); p2->Resize(i);
n0->Resize(i); n1->Resize(i); n2->Resize(i);
c0->Resize(i); c1->Resize(i); c2->Resize(i);
woop0->Resize(i); woop1->Resize(i); woop2->Resize(i);
maxSize = i;
size = min(size, i);
}
void GeometryList::Extend(int i){
if (maxSize < i)
Resize(i);
}
__global__ void
__launch_bounds__(MAX_THREADS)
CreateWoopValues(float4* p0s, float4* p1s, float4* p2s,
float4* m0s, float4* m1s, float4* m2s,
int primitives){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < primitives){
const float3 p0 = make_float3(p0s[id]);
const float3 p1 = make_float3(p1s[id]);
const float3 p2 = make_float3(p2s[id]);
float4 m0, m1, m2;
WoopTransformationMatrix(p1, p2, p0, m0, m1, m2);
m0s[id] = m0;
m1s[id] = m1;
m2s[id] = m2;
}
}
void GeometryList::GetWoopValues(float4** m0, float4** m1, float4** m2){
int i = p0->GetSize();
woop0->Resize(i); woop1->Resize(i); woop2->Resize(i);
KernelConf conf = KernelConf1D(i, MAX_THREADS);
hipLaunchKernelGGL(( CreateWoopValues), dim3(conf.blocks), dim3(conf.threads), 0, 0,
p0->GetDeviceData(), p1->GetDeviceData(), p2->GetDeviceData(),
woop0->GetDeviceData(), woop1->GetDeviceData(), woop2->GetDeviceData(),
i);
CHECK_FOR_CUDA_ERROR();
*m0 = woop0->GetDeviceData();
*m1 = woop1->GetDeviceData();
*m2 = woop2->GetDeviceData();
/*
int tri = 54;
logger.info << "Points: " << FetchGlobalData(p0->GetDeviceData(), tri) << ", "
<< FetchGlobalData(p1->GetDeviceData(), tri) << " & "
<< FetchGlobalData(p2->GetDeviceData(), tri) << logger.end;
float3 a = make_float3(FetchGlobalData(p0->GetDeviceData(), tri));
float3 b = make_float3(FetchGlobalData(p1->GetDeviceData(), tri));
float3 c = make_float3(FetchGlobalData(p2->GetDeviceData(), tri));
float4 w0, w1, w2;
WoopTransformationMatrix(b, c, a, w0, w1, w2);
logger.info << "CPU Woop: " << w0 << ", "
<< w1 << " & "
<< w2 << logger.end;
logger.info << "GPU Woop: " << FetchGlobalData(woop0->GetDeviceData(), tri) << ", "
<< FetchGlobalData(woop1->GetDeviceData(), tri) << " & "
<< FetchGlobalData(woop2->GetDeviceData(), tri) << logger.end;
*/
}
std::string GeometryList::ToString(unsigned int i) const {
std::ostringstream out;
out << "Triangle #" << i << "\n";
out << "Points: " << FetchGlobalData(p0->GetDeviceData(), i) << ", "
<< FetchGlobalData(p1->GetDeviceData(), i) << " & "
<< FetchGlobalData(p2->GetDeviceData(), i) << "\n";
out << "Normals: " << FetchGlobalData(n0->GetDeviceData(), i) << ", "
<< FetchGlobalData(n1->GetDeviceData(), i) << " & "
<< FetchGlobalData(n2->GetDeviceData(), i) << "\n";
out << "Colors: " << FetchGlobalData(c0->GetDeviceData(), i) << ", "
<< FetchGlobalData(c1->GetDeviceData(), i) << " & "
<< FetchGlobalData(c2->GetDeviceData(), i) << "\n";
return out.str();
}
__global__ void AddMeshKernel(unsigned int *indices,
float3 *verticesIn,
float3 *normalsIn,
float4 *colorsIn,
const Matrix44f modelMat, const Matrix33f normalMat,
float4 *p0, float4 *p1, float4 *p2,
float4 *n0, float4 *n1, float4 *n2,
uchar4 *c0, uchar4 *c1, uchar4 *c2,
int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
const int i = __mul24(id, 3);
const unsigned int i0 = indices[i];
const unsigned int i1 = indices[i+1];
const unsigned int i2 = indices[i+2];
const float3 v0 = verticesIn[i0];
const float3 v1 = verticesIn[i1];
const float3 v2 = verticesIn[i2];
p0[id] = modelMat * make_float4(v0, 1.0f);
p1[id] = modelMat * make_float4(v1, 1.0f);
p2[id] = modelMat * make_float4(v2, 1.0f);
n0[id] = make_float4(normalMat * normalsIn[i0], 0);
n1[id] = make_float4(normalMat * normalsIn[i1], 0);
n2[id] = make_float4(normalMat * normalsIn[i2], 0);
c0[id] = make_uchar4(colorsIn[i0].x * 255.0f, colorsIn[i0].y * 255.0f, colorsIn[i0].z * 255.0f, colorsIn[i0].w * 255.0f);
c1[id] = make_uchar4(colorsIn[i1].x * 255.0f, colorsIn[i1].y * 255.0f, colorsIn[i1].z * 255.0f, colorsIn[i1].w * 255.0f);
c2[id] = make_uchar4(colorsIn[i2].x * 255.0f, colorsIn[i2].y * 255.0f, colorsIn[i2].z * 255.0f, colorsIn[i2].w * 255.0f);
}
}
__global__ void
__launch_bounds__(MAX_THREADS)
AddMeshKernel(unsigned int *indices,
float4 *verticesIn,
float4 *normalsIn,
uchar4 *colorsIn,
const Matrix44f modelMat, const Matrix33f normalMat,
float4 *p0, float4 *p1, float4 *p2,
float4 *n0, float4 *n1, float4 *n2,
uchar4 *c0, uchar4 *c1, uchar4 *c2,
int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
const int i = __mul24(id, 3);
const unsigned int i0 = indices[i];
const unsigned int i1 = indices[i+1];
const unsigned int i2 = indices[i+2];
const float4 v0 = verticesIn[i0];
const float4 v1 = verticesIn[i1];
const float4 v2 = verticesIn[i2];
p0[id] = modelMat * v0;
p1[id] = modelMat * v1;
p2[id] = modelMat * v2;
n0[id] = make_float4(normalMat * make_float3(normalsIn[i0]), 0);
n1[id] = make_float4(normalMat * make_float3(normalsIn[i1]), 0);
n2[id] = make_float4(normalMat * make_float3(normalsIn[i2]), 0);
c0[id] = colorsIn[i0];
c1[id] = colorsIn[i1];
c2[id] = colorsIn[i2];
}
}
void GeometryList::AddMesh(MeshPtr mesh, Matrix<4,4,float> modelMat){
GeometrySetPtr geom = mesh->GetGeometrySet();
if (geom->GetDataBlock("vertex") && geom->GetDataBlock("vertex")->GetID() != 0){
// Geometry has been loaded to the graphics card
// and we can copy it from there.
IndicesPtr indices = mesh->GetIndices();
IDataBlockPtr vertices = geom->GetDataBlock("vertex");
IDataBlockPtr normals = geom->GetDataBlock("normal");
IDataBlockPtr colors = geom->GetDataBlock("color");
START_TIMER(timerID);
unsigned int triangles = indices->GetSize() / 3;
Extend(size + triangles);
cudaGraphicsResource *iResource, *vResource, *nResource, *cResource;
hipGraphicsGLRegisterBuffer(&iResource, indices->GetID(), hipGraphicsMapFlagsReadOnly);
hipGraphicsMapResources(1, &iResource, 0);
CHECK_FOR_CUDA_ERROR();
hipGraphicsGLRegisterBuffer(&vResource, vertices->GetID(), hipGraphicsMapFlagsReadOnly);
hipGraphicsMapResources(1, &vResource, 0);
CHECK_FOR_CUDA_ERROR();
hipGraphicsGLRegisterBuffer(&nResource, normals->GetID(), hipGraphicsMapFlagsReadOnly);
hipGraphicsMapResources(1, &nResource, 0);
CHECK_FOR_CUDA_ERROR();
hipGraphicsGLRegisterBuffer(&cResource, colors->GetID(), hipGraphicsMapFlagsReadOnly);
hipGraphicsMapResources(1, &cResource, 0);
CHECK_FOR_CUDA_ERROR();
size_t bytes;
unsigned int* in;
hipGraphicsResourceGetMappedPointer((void**)&in, &bytes,
iResource);
CHECK_FOR_CUDA_ERROR();
float3* pos;
hipGraphicsResourceGetMappedPointer((void**)&pos, &bytes,
vResource);
CHECK_FOR_CUDA_ERROR();
float3* norms;
hipGraphicsResourceGetMappedPointer((void**)&norms, &bytes,
nResource);
CHECK_FOR_CUDA_ERROR();
float4* cols;
hipGraphicsResourceGetMappedPointer((void**)&cols, &bytes,
cResource);
CHECK_FOR_CUDA_ERROR();
unsigned int blocks, threads;
Calc1DKernelDimensions(indices->GetSize(), blocks, threads);
Math::CUDA::Matrix44f mat;
mat.Init(modelMat.GetTranspose());
Math::CUDA::Matrix33f normMat; // should be transposed and inverted, jada jada bla bla just don't do weird scaling
normMat.Init(mat);
CHECK_FOR_CUDA_ERROR();
hipLaunchKernelGGL(( AddMeshKernel), dim3(blocks), dim3(threads), 0, 0, in, pos, norms, cols,
mat, normMat,
p0->GetDeviceData() + size, p1->GetDeviceData() + size, p2->GetDeviceData() + size,
n0->GetDeviceData() + size, n1->GetDeviceData() + size, n2->GetDeviceData() + size,
c0->GetDeviceData() + size, c1->GetDeviceData() + size, c2->GetDeviceData() + size,
triangles);
CHECK_FOR_CUDA_ERROR();
size += triangles;
hipGraphicsUnmapResources(1, &iResource, 0);
hipGraphicsUnmapResources(1, &vResource, 0);
hipGraphicsUnmapResources(1, &nResource, 0);
hipGraphicsUnmapResources(1, &cResource, 0);
CHECK_FOR_CUDA_ERROR();
hipGraphicsUnregisterResource(iResource);
hipGraphicsUnregisterResource(vResource);
hipGraphicsUnregisterResource(nResource);
hipGraphicsUnregisterResource(cResource);
CHECK_FOR_CUDA_ERROR();
PRINT_TIMER(timerID, "Geometry collection ");
}else{
// Geometry is still on the CPU
throw Exception("Not implemented");
}
}
void GeometryList::AddMesh(CUDAMeshNode* mesh,
Matrix<4, 4, float> modelMat){
unsigned int triangles = mesh->GetSize() / 3;
Extend(size + triangles);
Math::CUDA::Matrix44f mat;
mat.Init(modelMat.GetTranspose());
Math::CUDA::Matrix33f normMat; // should be transposed and inverted, jada jada bla bla just don't do weird scaling
normMat.Init(mat);
CHECK_FOR_CUDA_ERROR();
unsigned int blocks, threads;
Calc1DKernelDimensions(mesh->GetSize(), blocks, threads, MAX_THREADS);
hipLaunchKernelGGL(( AddMeshKernel), dim3(blocks), dim3(threads), 0, 0, mesh->GetIndexData(), mesh->GetVertexData(), mesh->GetNormalData(), mesh->GetColorData(),
mat, normMat,
p0->GetDeviceData() + size, p1->GetDeviceData() + size, p2->GetDeviceData() + size,
n0->GetDeviceData() + size, n1->GetDeviceData() + size, n2->GetDeviceData() + size,
c0->GetDeviceData() + size, c1->GetDeviceData() + size, c2->GetDeviceData() + size,
triangles);
CHECK_FOR_CUDA_ERROR();
size += triangles;
}
void GeometryList::CollectGeometry(ISceneNode* node){
currentModelMat = Matrix<4,4, float>();
size = 0;
node->Accept(*this);
}
void GeometryList::VisitRenderStateNode(RenderStateNode* node){
node->VisitSubNodes(*this);
}
void GeometryList::VisitTransformationNode(TransformationNode* node){
// push transformation matrix
Matrix<4,4,float> m = node->GetTransformationMatrix();
Matrix<4, 4, float> oldModelMat = currentModelMat;
currentModelMat = m * currentModelMat;
// traverse sub nodes
node->VisitSubNodes(*this);
// pop transformation matrix
currentModelMat = oldModelMat;
}
void GeometryList::VisitMeshNode(MeshNode* node){
if (node->GetMesh()->GetGeometrySet()->GetVertices()->GetID() != 0){
AddMesh(node->GetMesh(), currentModelMat);
node->VisitSubNodes(*this);
}else{
CUDAMeshNode* mesh = new CUDAMeshNode(node);
node->GetParent()->ReplaceNode(node, mesh);
std::list<ISceneNode*> subNodes = node->subNodes;
for (std::list<ISceneNode*>::iterator itr = subNodes.begin();
itr != subNodes.end(); ++itr){
node->RemoveNode(*itr);
mesh->AddNode(*itr);
}
mesh->Accept(*this);
}
}
void GeometryList::VisitCUDAMeshNode(CUDAMeshNode* node){
AddMesh(node, currentModelMat);
node->VisitSubNodes(*this);
}
void GeometryList::VisitOscCUDAMeshNode(OscCUDAMeshNode* node){
//logger.info << "Visit OscCUDAMeshNode" << logger.end;
node->Init();
//logger.info << node->ToString() << logger.end;
AddMesh(node, currentModelMat);
node->VisitSubNodes(*this);
}
}
}
}
| 763379564ec411d8fc260e56be4a4fb51881fc05.cu | // Geometry List.
// -------------------------------------------------------------------
// Copyright (C) 2010 OpenEngine.dk (See AUTHORS)
//
// This program is free software; It is covered by the GNU General
// Public License version 2 or any later version.
// See the GNU General Public License for more details (see LICENSE).
//--------------------------------------------------------------------
#include <Utils/CUDA/GeometryList.h>
#include <Geometry/Mesh.h>
#include <Geometry/GeometrySet.h>
#include <Math/CUDA/Matrix.h>
#include <Scene/ISceneNode.h>
#include <Scene/MeshNode.h>
#include <Scene/CUDAMeshNode.h>
#include <Scene/OscCUDAMeshNode.h>
#include <Scene/RenderStateNode.h>
#include <Scene/TransformationNode.h>
#include <Utils/CUDA/Utils.h>
#include <Utils/CUDA/IntersectionTests.h>
#include <Utils/CUDA/Convert.h>
#include <Utils/CUDA/LoggerExtensions.h>
#include <sstream>
using namespace OpenEngine::Geometry;
using namespace OpenEngine::Math;
using namespace OpenEngine::Math::CUDA;
using namespace OpenEngine::Scene;
using namespace OpenEngine::Resources::CUDA;
#define MAX_THREADS 128
namespace OpenEngine {
namespace Utils {
namespace CUDA {
GeometryList::GeometryList()
: maxSize(0), size(0) {}
GeometryList::GeometryList(int size)
: maxSize(size), size(0) {
cutCreateTimer(&timerID);
p0 = new CUDADataBlock<float4>(maxSize);
p1 = new CUDADataBlock<float4>(maxSize);
p2 = new CUDADataBlock<float4>(maxSize);
n0 = new CUDADataBlock<float4>(maxSize);
n1 = new CUDADataBlock<float4>(maxSize);
n2 = new CUDADataBlock<float4>(maxSize);
c0 = new CUDADataBlock<uchar4>(maxSize);
c1 = new CUDADataBlock<uchar4>(maxSize);
c2 = new CUDADataBlock<uchar4>(maxSize);
woop0 = new CUDADataBlock<float4>(maxSize);
woop1 = new CUDADataBlock<float4>(maxSize);
woop2 = new CUDADataBlock<float4>(maxSize);
}
void GeometryList::Resize(int i){
p0->Resize(i); p1->Resize(i); p2->Resize(i);
n0->Resize(i); n1->Resize(i); n2->Resize(i);
c0->Resize(i); c1->Resize(i); c2->Resize(i);
woop0->Resize(i); woop1->Resize(i); woop2->Resize(i);
maxSize = i;
size = min(size, i);
}
void GeometryList::Extend(int i){
if (maxSize < i)
Resize(i);
}
__global__ void
__launch_bounds__(MAX_THREADS)
CreateWoopValues(float4* p0s, float4* p1s, float4* p2s,
float4* m0s, float4* m1s, float4* m2s,
int primitives){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < primitives){
const float3 p0 = make_float3(p0s[id]);
const float3 p1 = make_float3(p1s[id]);
const float3 p2 = make_float3(p2s[id]);
float4 m0, m1, m2;
WoopTransformationMatrix(p1, p2, p0, m0, m1, m2);
m0s[id] = m0;
m1s[id] = m1;
m2s[id] = m2;
}
}
void GeometryList::GetWoopValues(float4** m0, float4** m1, float4** m2){
int i = p0->GetSize();
woop0->Resize(i); woop1->Resize(i); woop2->Resize(i);
KernelConf conf = KernelConf1D(i, MAX_THREADS);
CreateWoopValues<<<conf.blocks, conf.threads>>>
(p0->GetDeviceData(), p1->GetDeviceData(), p2->GetDeviceData(),
woop0->GetDeviceData(), woop1->GetDeviceData(), woop2->GetDeviceData(),
i);
CHECK_FOR_CUDA_ERROR();
*m0 = woop0->GetDeviceData();
*m1 = woop1->GetDeviceData();
*m2 = woop2->GetDeviceData();
/*
int tri = 54;
logger.info << "Points: " << FetchGlobalData(p0->GetDeviceData(), tri) << ", "
<< FetchGlobalData(p1->GetDeviceData(), tri) << " & "
<< FetchGlobalData(p2->GetDeviceData(), tri) << logger.end;
float3 a = make_float3(FetchGlobalData(p0->GetDeviceData(), tri));
float3 b = make_float3(FetchGlobalData(p1->GetDeviceData(), tri));
float3 c = make_float3(FetchGlobalData(p2->GetDeviceData(), tri));
float4 w0, w1, w2;
WoopTransformationMatrix(b, c, a, w0, w1, w2);
logger.info << "CPU Woop: " << w0 << ", "
<< w1 << " & "
<< w2 << logger.end;
logger.info << "GPU Woop: " << FetchGlobalData(woop0->GetDeviceData(), tri) << ", "
<< FetchGlobalData(woop1->GetDeviceData(), tri) << " & "
<< FetchGlobalData(woop2->GetDeviceData(), tri) << logger.end;
*/
}
std::string GeometryList::ToString(unsigned int i) const {
std::ostringstream out;
out << "Triangle #" << i << "\n";
out << "Points: " << FetchGlobalData(p0->GetDeviceData(), i) << ", "
<< FetchGlobalData(p1->GetDeviceData(), i) << " & "
<< FetchGlobalData(p2->GetDeviceData(), i) << "\n";
out << "Normals: " << FetchGlobalData(n0->GetDeviceData(), i) << ", "
<< FetchGlobalData(n1->GetDeviceData(), i) << " & "
<< FetchGlobalData(n2->GetDeviceData(), i) << "\n";
out << "Colors: " << FetchGlobalData(c0->GetDeviceData(), i) << ", "
<< FetchGlobalData(c1->GetDeviceData(), i) << " & "
<< FetchGlobalData(c2->GetDeviceData(), i) << "\n";
return out.str();
}
__global__ void AddMeshKernel(unsigned int *indices,
float3 *verticesIn,
float3 *normalsIn,
float4 *colorsIn,
const Matrix44f modelMat, const Matrix33f normalMat,
float4 *p0, float4 *p1, float4 *p2,
float4 *n0, float4 *n1, float4 *n2,
uchar4 *c0, uchar4 *c1, uchar4 *c2,
int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
const int i = __mul24(id, 3);
const unsigned int i0 = indices[i];
const unsigned int i1 = indices[i+1];
const unsigned int i2 = indices[i+2];
const float3 v0 = verticesIn[i0];
const float3 v1 = verticesIn[i1];
const float3 v2 = verticesIn[i2];
p0[id] = modelMat * make_float4(v0, 1.0f);
p1[id] = modelMat * make_float4(v1, 1.0f);
p2[id] = modelMat * make_float4(v2, 1.0f);
n0[id] = make_float4(normalMat * normalsIn[i0], 0);
n1[id] = make_float4(normalMat * normalsIn[i1], 0);
n2[id] = make_float4(normalMat * normalsIn[i2], 0);
c0[id] = make_uchar4(colorsIn[i0].x * 255.0f, colorsIn[i0].y * 255.0f, colorsIn[i0].z * 255.0f, colorsIn[i0].w * 255.0f);
c1[id] = make_uchar4(colorsIn[i1].x * 255.0f, colorsIn[i1].y * 255.0f, colorsIn[i1].z * 255.0f, colorsIn[i1].w * 255.0f);
c2[id] = make_uchar4(colorsIn[i2].x * 255.0f, colorsIn[i2].y * 255.0f, colorsIn[i2].z * 255.0f, colorsIn[i2].w * 255.0f);
}
}
__global__ void
__launch_bounds__(MAX_THREADS)
AddMeshKernel(unsigned int *indices,
float4 *verticesIn,
float4 *normalsIn,
uchar4 *colorsIn,
const Matrix44f modelMat, const Matrix33f normalMat,
float4 *p0, float4 *p1, float4 *p2,
float4 *n0, float4 *n1, float4 *n2,
uchar4 *c0, uchar4 *c1, uchar4 *c2,
int size){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size){
const int i = __mul24(id, 3);
const unsigned int i0 = indices[i];
const unsigned int i1 = indices[i+1];
const unsigned int i2 = indices[i+2];
const float4 v0 = verticesIn[i0];
const float4 v1 = verticesIn[i1];
const float4 v2 = verticesIn[i2];
p0[id] = modelMat * v0;
p1[id] = modelMat * v1;
p2[id] = modelMat * v2;
n0[id] = make_float4(normalMat * make_float3(normalsIn[i0]), 0);
n1[id] = make_float4(normalMat * make_float3(normalsIn[i1]), 0);
n2[id] = make_float4(normalMat * make_float3(normalsIn[i2]), 0);
c0[id] = colorsIn[i0];
c1[id] = colorsIn[i1];
c2[id] = colorsIn[i2];
}
}
void GeometryList::AddMesh(MeshPtr mesh, Matrix<4,4,float> modelMat){
GeometrySetPtr geom = mesh->GetGeometrySet();
if (geom->GetDataBlock("vertex") && geom->GetDataBlock("vertex")->GetID() != 0){
// Geometry has been loaded to the graphics card
// and we can copy it from there.
IndicesPtr indices = mesh->GetIndices();
IDataBlockPtr vertices = geom->GetDataBlock("vertex");
IDataBlockPtr normals = geom->GetDataBlock("normal");
IDataBlockPtr colors = geom->GetDataBlock("color");
START_TIMER(timerID);
unsigned int triangles = indices->GetSize() / 3;
Extend(size + triangles);
cudaGraphicsResource *iResource, *vResource, *nResource, *cResource;
cudaGraphicsGLRegisterBuffer(&iResource, indices->GetID(), cudaGraphicsMapFlagsReadOnly);
cudaGraphicsMapResources(1, &iResource, 0);
CHECK_FOR_CUDA_ERROR();
cudaGraphicsGLRegisterBuffer(&vResource, vertices->GetID(), cudaGraphicsMapFlagsReadOnly);
cudaGraphicsMapResources(1, &vResource, 0);
CHECK_FOR_CUDA_ERROR();
cudaGraphicsGLRegisterBuffer(&nResource, normals->GetID(), cudaGraphicsMapFlagsReadOnly);
cudaGraphicsMapResources(1, &nResource, 0);
CHECK_FOR_CUDA_ERROR();
cudaGraphicsGLRegisterBuffer(&cResource, colors->GetID(), cudaGraphicsMapFlagsReadOnly);
cudaGraphicsMapResources(1, &cResource, 0);
CHECK_FOR_CUDA_ERROR();
size_t bytes;
unsigned int* in;
cudaGraphicsResourceGetMappedPointer((void**)&in, &bytes,
iResource);
CHECK_FOR_CUDA_ERROR();
float3* pos;
cudaGraphicsResourceGetMappedPointer((void**)&pos, &bytes,
vResource);
CHECK_FOR_CUDA_ERROR();
float3* norms;
cudaGraphicsResourceGetMappedPointer((void**)&norms, &bytes,
nResource);
CHECK_FOR_CUDA_ERROR();
float4* cols;
cudaGraphicsResourceGetMappedPointer((void**)&cols, &bytes,
cResource);
CHECK_FOR_CUDA_ERROR();
unsigned int blocks, threads;
Calc1DKernelDimensions(indices->GetSize(), blocks, threads);
Math::CUDA::Matrix44f mat;
mat.Init(modelMat.GetTranspose());
Math::CUDA::Matrix33f normMat; // should be transposed and inverted, jada jada bla bla just don't do weird scaling
normMat.Init(mat);
CHECK_FOR_CUDA_ERROR();
AddMeshKernel<<<blocks, threads>>>(in, pos, norms, cols,
mat, normMat,
p0->GetDeviceData() + size, p1->GetDeviceData() + size, p2->GetDeviceData() + size,
n0->GetDeviceData() + size, n1->GetDeviceData() + size, n2->GetDeviceData() + size,
c0->GetDeviceData() + size, c1->GetDeviceData() + size, c2->GetDeviceData() + size,
triangles);
CHECK_FOR_CUDA_ERROR();
size += triangles;
cudaGraphicsUnmapResources(1, &iResource, 0);
cudaGraphicsUnmapResources(1, &vResource, 0);
cudaGraphicsUnmapResources(1, &nResource, 0);
cudaGraphicsUnmapResources(1, &cResource, 0);
CHECK_FOR_CUDA_ERROR();
cudaGraphicsUnregisterResource(iResource);
cudaGraphicsUnregisterResource(vResource);
cudaGraphicsUnregisterResource(nResource);
cudaGraphicsUnregisterResource(cResource);
CHECK_FOR_CUDA_ERROR();
PRINT_TIMER(timerID, "Geometry collection ");
}else{
// Geometry is still on the CPU
throw Exception("Not implemented");
}
}
void GeometryList::AddMesh(CUDAMeshNode* mesh,
Matrix<4, 4, float> modelMat){
unsigned int triangles = mesh->GetSize() / 3;
Extend(size + triangles);
Math::CUDA::Matrix44f mat;
mat.Init(modelMat.GetTranspose());
Math::CUDA::Matrix33f normMat; // should be transposed and inverted, jada jada bla bla just don't do weird scaling
normMat.Init(mat);
CHECK_FOR_CUDA_ERROR();
unsigned int blocks, threads;
Calc1DKernelDimensions(mesh->GetSize(), blocks, threads, MAX_THREADS);
AddMeshKernel<<<blocks, threads>>>(mesh->GetIndexData(), mesh->GetVertexData(), mesh->GetNormalData(), mesh->GetColorData(),
mat, normMat,
p0->GetDeviceData() + size, p1->GetDeviceData() + size, p2->GetDeviceData() + size,
n0->GetDeviceData() + size, n1->GetDeviceData() + size, n2->GetDeviceData() + size,
c0->GetDeviceData() + size, c1->GetDeviceData() + size, c2->GetDeviceData() + size,
triangles);
CHECK_FOR_CUDA_ERROR();
size += triangles;
}
void GeometryList::CollectGeometry(ISceneNode* node){
currentModelMat = Matrix<4,4, float>();
size = 0;
node->Accept(*this);
}
void GeometryList::VisitRenderStateNode(RenderStateNode* node){
node->VisitSubNodes(*this);
}
void GeometryList::VisitTransformationNode(TransformationNode* node){
// push transformation matrix
Matrix<4,4,float> m = node->GetTransformationMatrix();
Matrix<4, 4, float> oldModelMat = currentModelMat;
currentModelMat = m * currentModelMat;
// traverse sub nodes
node->VisitSubNodes(*this);
// pop transformation matrix
currentModelMat = oldModelMat;
}
void GeometryList::VisitMeshNode(MeshNode* node){
if (node->GetMesh()->GetGeometrySet()->GetVertices()->GetID() != 0){
AddMesh(node->GetMesh(), currentModelMat);
node->VisitSubNodes(*this);
}else{
CUDAMeshNode* mesh = new CUDAMeshNode(node);
node->GetParent()->ReplaceNode(node, mesh);
std::list<ISceneNode*> subNodes = node->subNodes;
for (std::list<ISceneNode*>::iterator itr = subNodes.begin();
itr != subNodes.end(); ++itr){
node->RemoveNode(*itr);
mesh->AddNode(*itr);
}
mesh->Accept(*this);
}
}
void GeometryList::VisitCUDAMeshNode(CUDAMeshNode* node){
AddMesh(node, currentModelMat);
node->VisitSubNodes(*this);
}
void GeometryList::VisitOscCUDAMeshNode(OscCUDAMeshNode* node){
//logger.info << "Visit OscCUDAMeshNode" << logger.end;
node->Init();
//logger.info << node->ToString() << logger.end;
AddMesh(node, currentModelMat);
node->VisitSubNodes(*this);
}
}
}
}
|
8d165debcfe0f9c78bcc01eaa149b677fb763292.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops.h"
#include "cuda_helper.h"
void Conv2D::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
int inputC = inputs[0].dim[1];
int inputH = inputs[0].dim[2];
int inputW = inputs[0].dim[3];
// set descriptors
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, BATCH_SIZE, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, outputC, inputC, kernelH, kernelW));
//checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
strideH, strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == BATCH_SIZE);
assert(c == outputC);
assert(outputs[0].dim[2] == h);
assert(outputs[0].dim[3] == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
if (relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
int outputC = c;
int outputH = h;
int outputW = w;
// allocate tensors
size_t filterSize = sizeof(DATATYPE) * inputC * outputC * kernelH * kernelW;
size_t outputSize = sizeof(DATATYPE) * BATCH_SIZE * outputC * outputH * outputW;
size_t biasSize = sizeof(DATATYPE) * outputC;
checkCUDA(hipMalloc(&filterPtr, filterSize));
checkCUDA(hipMalloc(&biasPtr, biasSize));
checkCUDA(hipMalloc(&outputs[0].ptr, outputSize));
}
void Conv2D::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(biasTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc));
if (relu) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
// free tensors
checkCUDA(hipFree(filterPtr));
checkCUDA(hipFree(outputs[0].ptr));
checkCUDA(hipFree(biasPtr));
}
void Conv2D::forward(void)
{
const float alpha = 1.0f;
const float beta = 0.0f;
if (relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
model->dnn, &alpha, inputTensor, inputs[0].ptr, filterDesc, filterPtr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].ptr, biasTensor, biasPtr, actiDesc,
outputTensor, outputs[0].ptr));
} else {
checkCUDNN(cudnnConvolutionForward(
model->dnn, &alpha, inputTensor, inputs[0].ptr, filterDesc, filterPtr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].ptr));
checkCUDNN(cudnnAddTensor(model->dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputs[0].ptr));
}
}
void Model::measure_conv2d_cost(Conv2D* conv)
{
assert(conv->algo_cost_mp.empty());
const float alpha = 1.0f;
const float beta = 0.0f;
int inputC = conv->inputs[0].dim[1];
int inputH = conv->inputs[0].dim[2];
int inputW = conv->inputs[0].dim[3];
int outputC = conv->outputs[0].dim[1];
int outputH = conv->outputs[0].dim[2];
int outputW = conv->outputs[0].dim[3];
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, BATCH_SIZE, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, outputC, inputC, conv->kernelH, conv->kernelW));
//checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, conv->padH, conv->padW,
conv->strideH, conv->strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == BATCH_SIZE);
assert(c == outputC);
assert(outputH == h);
assert(outputW == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
size_t inputSize = sizeof(DATATYPE) * BATCH_SIZE * inputC * inputH * inputW;
size_t filterSize = sizeof(DATATYPE) * inputC * outputC
* conv->kernelH * conv->kernelW;
size_t outputSize = sizeof(DATATYPE) * BATCH_SIZE * outputC * outputH * outputW;
assert(inputSize < MAX_TENSOR_SIZE);
assert(filterSize < MAX_TENSOR_SIZE);
assert(outputSize < MAX_TENSOR_SIZE);
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
dnn, inputTensor, inputPtr, filterDesc, filterPtr, convDesc,
outputTensor, outputPtr, reqAlgCnt, &cnt, perfResults,
workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
#ifdef VERBOSE
for (int i = 0; i < cnt; i++) {
printf("fwdAlgo(%d) time(%.2lfms) space(%zuMB)\n", perfResults[i].algo,
perfResults[i].time, perfResults[i].memory / 1024 / 1024);
}
#endif
double best_cost=999999999999999.0;
int first=1;
int begin=cnt-1;
if(use_perf_order)
{
begin=0;
}
for(int idx=begin;idx>=0;idx--)
{
if(int(perfResults[idx].status)!=0) continue;
cudnnConvolutionFwdAlgo_t current_algo=perfResults[idx].algo;
//printf("<<<%d>>>\n",int(perfResults[idx].status));
//conv->fwdAlgo = (cudnnConvolutionFwdAlgo_t)2;
string key=export_op_key(*conv)+",<"+to_string(current_algo)+">";
double runtime;
double power;
double energy;
//printf("<pre_measure>, %s\n",key.c_str());
if(mp.find(key)!=mp.end())
{
runtime=mp[key].runtime;
power=mp[key].power;
energy=mp[key].power*mp[key].runtime;
if(!mute)
{
printf("<found from mp>, %s, ",key.c_str());
printf("runtime=%f power=%f energe=%f\n", mp[key].runtime, mp[key].power, mp[key].power*mp[key].runtime);
}
goto end;
}
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
if (conv->relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
//double runtime=conv->runtime = milliseconds / REPEAT_TIMES;
{
long times=0;
double current_time=get_current_time();
double current_time2;
start_check_power();
for (int i = 0; ; i++,times++) {
if(i%CHECK_TIME_PERIOD==0&&(current_time2=get_current_time())-current_time>measure_time) break;
if (conv->relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
power=finish_check_power();
runtime= (current_time2-current_time)/times;
energy=power*runtime;
printf("<measure>, %s, ",key.c_str());
printf("runtime=%f power=%f energy=%f\n",runtime,power,power*runtime);
mp[key].runtime=runtime;
mp[key].power=power;
db_output<<key<<"|"<<runtime<<"|"<<power<<endl;
db_output.flush();
#ifdef VERBOSE
printf("measure[Conv2D]: i(%d %d %d %d) o(%d) k(%d %d) s(%d %d) p(%d %d) cost(%.4lf)\n",
BATCH_SIZE, inputC, inputH, inputW, outputC, conv->kernelH, conv->kernelW,
conv->strideH, conv->strideW, conv->padH, conv->padW, conv->runtime);
#endif
}
end:
cost_t tmp;
tmp.runtime=runtime;
tmp.energy=energy;
conv->algo_cost_mp[current_algo]=tmp;
double cost= cost_func(runtime,power);
if(first||cost<best_cost||use_perf_order)
{
first=0;
best_cost=cost;
conv->runtime=runtime;
conv->power=power;
conv->energy=energy;
conv->fwdAlgo=current_algo;
}
}
}
| 8d165debcfe0f9c78bcc01eaa149b677fb763292.cu | /* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops.h"
#include "cuda_helper.h"
void Conv2D::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
int inputC = inputs[0].dim[1];
int inputH = inputs[0].dim[2];
int inputW = inputs[0].dim[3];
// set descriptors
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, BATCH_SIZE, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, outputC, inputC, kernelH, kernelW));
//checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padH, padW,
strideH, strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == BATCH_SIZE);
assert(c == outputC);
assert(outputs[0].dim[2] == h);
assert(outputs[0].dim[3] == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
if (relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
}
int outputC = c;
int outputH = h;
int outputW = w;
// allocate tensors
size_t filterSize = sizeof(DATATYPE) * inputC * outputC * kernelH * kernelW;
size_t outputSize = sizeof(DATATYPE) * BATCH_SIZE * outputC * outputH * outputW;
size_t biasSize = sizeof(DATATYPE) * outputC;
checkCUDA(cudaMalloc(&filterPtr, filterSize));
checkCUDA(cudaMalloc(&biasPtr, biasSize));
checkCUDA(cudaMalloc(&outputs[0].ptr, outputSize));
}
void Conv2D::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(biasTensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor));
checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc));
checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc));
if (relu) {
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
}
// free tensors
checkCUDA(cudaFree(filterPtr));
checkCUDA(cudaFree(outputs[0].ptr));
checkCUDA(cudaFree(biasPtr));
}
void Conv2D::forward(void)
{
const float alpha = 1.0f;
const float beta = 0.0f;
if (relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
model->dnn, &alpha, inputTensor, inputs[0].ptr, filterDesc, filterPtr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].ptr, biasTensor, biasPtr, actiDesc,
outputTensor, outputs[0].ptr));
} else {
checkCUDNN(cudnnConvolutionForward(
model->dnn, &alpha, inputTensor, inputs[0].ptr, filterDesc, filterPtr,
convDesc, fwdAlgo, model->workSpace, model->workSpaceSize,
&beta, outputTensor, outputs[0].ptr));
checkCUDNN(cudnnAddTensor(model->dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputs[0].ptr));
}
}
void Model::measure_conv2d_cost(Conv2D* conv)
{
assert(conv->algo_cost_mp.empty());
const float alpha = 1.0f;
const float beta = 0.0f;
int inputC = conv->inputs[0].dim[1];
int inputH = conv->inputs[0].dim[2];
int inputW = conv->inputs[0].dim[3];
int outputC = conv->outputs[0].dim[1];
int outputH = conv->outputs[0].dim[2];
int outputW = conv->outputs[0].dim[3];
checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, BATCH_SIZE, inputC, inputH, inputW));
checkCUDNN(cudnnSetTensor4dDescriptor(biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, outputC, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, outputC, inputC, conv->kernelH, conv->kernelW));
//checkCUDNN(cudnnSetConvolutionMathType(convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION));
checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, conv->padH, conv->padW,
conv->strideH, conv->strideW, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc,
inputTensor, filterDesc, &n, &c, &h, &w));
assert(n == BATCH_SIZE);
assert(c == outputC);
assert(outputH == h);
assert(outputW == w);
checkCUDNN(cudnnSetTensor4dDescriptor(outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
size_t inputSize = sizeof(DATATYPE) * BATCH_SIZE * inputC * inputH * inputW;
size_t filterSize = sizeof(DATATYPE) * inputC * outputC
* conv->kernelH * conv->kernelW;
size_t outputSize = sizeof(DATATYPE) * BATCH_SIZE * outputC * outputH * outputW;
assert(inputSize < MAX_TENSOR_SIZE);
assert(filterSize < MAX_TENSOR_SIZE);
assert(outputSize < MAX_TENSOR_SIZE);
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
dnn, inputTensor, inputPtr, filterDesc, filterPtr, convDesc,
outputTensor, outputPtr, reqAlgCnt, &cnt, perfResults,
workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
#ifdef VERBOSE
for (int i = 0; i < cnt; i++) {
printf("fwdAlgo(%d) time(%.2lfms) space(%zuMB)\n", perfResults[i].algo,
perfResults[i].time, perfResults[i].memory / 1024 / 1024);
}
#endif
double best_cost=999999999999999.0;
int first=1;
int begin=cnt-1;
if(use_perf_order)
{
begin=0;
}
for(int idx=begin;idx>=0;idx--)
{
if(int(perfResults[idx].status)!=0) continue;
cudnnConvolutionFwdAlgo_t current_algo=perfResults[idx].algo;
//printf("<<<%d>>>\n",int(perfResults[idx].status));
//conv->fwdAlgo = (cudnnConvolutionFwdAlgo_t)2;
string key=export_op_key(*conv)+",<"+to_string(current_algo)+">";
double runtime;
double power;
double energy;
//printf("<pre_measure>, %s\n",key.c_str());
if(mp.find(key)!=mp.end())
{
runtime=mp[key].runtime;
power=mp[key].power;
energy=mp[key].power*mp[key].runtime;
if(!mute)
{
printf("<found from mp>, %s, ",key.c_str());
printf("runtime=%f power=%f energe=%f\n", mp[key].runtime, mp[key].power, mp[key].power*mp[key].runtime);
}
goto end;
}
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
if (conv->relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
//double runtime=conv->runtime = milliseconds / REPEAT_TIMES;
{
long times=0;
double current_time=get_current_time();
double current_time2;
start_check_power();
for (int i = 0; ; i++,times++) {
if(i%CHECK_TIME_PERIOD==0&&(current_time2=get_current_time())-current_time>measure_time) break;
if (conv->relu) {
checkCUDNN(cudnnConvolutionBiasActivationForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr, biasTensor, biasPtr, actiDesc,
outputTensor, outputPtr));
} else {
checkCUDNN(cudnnConvolutionForward(
dnn, &alpha, inputTensor, inputPtr, filterDesc, filterPtr,
convDesc, current_algo, workSpace, workSpaceSize,
&beta, outputTensor, outputPtr));
checkCUDNN(cudnnAddTensor(dnn, &alpha, biasTensor, biasPtr,
&alpha, outputTensor, outputPtr));
}
}
power=finish_check_power();
runtime= (current_time2-current_time)/times;
energy=power*runtime;
printf("<measure>, %s, ",key.c_str());
printf("runtime=%f power=%f energy=%f\n",runtime,power,power*runtime);
mp[key].runtime=runtime;
mp[key].power=power;
db_output<<key<<"|"<<runtime<<"|"<<power<<endl;
db_output.flush();
#ifdef VERBOSE
printf("measure[Conv2D]: i(%d %d %d %d) o(%d) k(%d %d) s(%d %d) p(%d %d) cost(%.4lf)\n",
BATCH_SIZE, inputC, inputH, inputW, outputC, conv->kernelH, conv->kernelW,
conv->strideH, conv->strideW, conv->padH, conv->padW, conv->runtime);
#endif
}
end:
cost_t tmp;
tmp.runtime=runtime;
tmp.energy=energy;
conv->algo_cost_mp[current_algo]=tmp;
double cost= cost_func(runtime,power);
if(first||cost<best_cost||use_perf_order)
{
first=0;
best_cost=cost;
conv->runtime=runtime;
conv->power=power;
conv->energy=energy;
conv->fwdAlgo=current_algo;
}
}
}
|
b4d18875d859a93f2ab0747286967baa90b210de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "include.cuh"
#include "ANCFSystem.cuh"
#include <cusp/io/matrix_market.h>
//// linear operator y = A*x (for CUSP)
//class stencil: public cusp::linear_operator<double, cusp::device_memory> {
//public:
// typedef cusp::linear_operator<double, cusp::device_memory> super;
//
// int N;
// DeviceView massMatrix;
// DeviceView phiqMatrix;
// DeviceValueArrayView temp;
//
//// constructor
// stencil(int N, DeviceView lhs_mass, DeviceView lhs_phiq,
// DeviceValueArrayView tempVector) :
// super(N, N), N(N) {
// massMatrix = lhs_mass;
// phiqMatrix = lhs_phiq;
// temp = tempVector;
// }
//
//// linear operator y = A*x
// template<typename VectorType1, typename VectorType2>
// void operator()(const VectorType1& x, VectorType2& y) const {
//// obtain a raw pointer to device memory
// cusp::multiply(massMatrix, x, temp);
// cusp::multiply(phiqMatrix, x, y);
// cusp::blas::axpy(temp, y, 1);
// }
//};
ANCFSystem::ANCFSystem()
{
// Set default solver parameters
setAlpha_HHT(-0.1);
setTimeStep(1e-3);
maxNewtonIterations = 20;
// spike stuff
partitions = 1;
solverOptions.safeFactorization = true;
solverOptions.trackReordering = true;
solverOptions.maxNumIterations = 5000;
//mySpmv = new SpmvFunctor(lhs);
// m_spmv = new MySpmv(lhs_mass, lhs_phiq, lhsVec);
preconditionerUpdateModulus = -1; // the preconditioner updates every ___ time steps
preconditionerMaxKrylovIterations = -1; // the preconditioner updates if Krylov iterations are greater than ____ iterations
// end spike stuff
this->timeIndex = 0;
this->time = 0;
timeToSimulate = 0;
simTime = 0;
fullJacobian = 1;
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
numCollisions = 0;
numCollisionsSphere = 0;
numContactPoints = 5;
coefRestitution = .3;
frictionCoef = .3;
fileIndex = 0;
// set up position files
char filename1[100];
char filename2[100];
char filename3[100];
sprintf(filename1, "position.dat");
resultsFile1.open(filename1);
sprintf(filename2, "energy.dat");
resultsFile2.open(filename2);
sprintf(filename3, "reactions.dat");
resultsFile3.open(filename3);
}
void ANCFSystem::setSolverType(int solverType)
{
switch(solverType) {
case 0:
solverOptions.solverType = spike::BiCGStab;
break;
case 1:
solverOptions.solverType = spike::BiCGStab1;
break;
case 2:
solverOptions.solverType = spike::BiCGStab2;
break;
case 3:
solverOptions.solverType = spike::MINRES;
break;
}
}
void ANCFSystem::setPrecondType(int useSpike)
{
solverOptions.precondType = useSpike ? spike::Spike : spike::None;
}
void ANCFSystem::setAlpha_HHT(double alpha) {
// should be greater than -.3, usually set to -.1
alphaHHT = alpha;
betaHHT = (1 - alphaHHT) * (1 - alphaHHT) * .25;
gammaHHT = 0.5 - alphaHHT;
}
void ANCFSystem::setTimeStep(double step_size,
double precision)
{
h = step_size;
// Set tolerance for Newton iteration based on the precision in positions
// and integration step-size.
double safety = 1;////0.5;
tol = safety * precision / (h * h);
// Set the tolerances for Krylov
solverOptions.relTol = ::min(0.01 * tol, 1e-6);
solverOptions.absTol = 1e-10;
}
void ANCFSystem::printSolverParams()
{
printf("Step size: %e\n", h);
printf("Newton tolerance: %e\n", tol);
printf("Max. Newton iterations: %d\n", maxNewtonIterations);
printf("Krylov relTol: %e abdTol: %e\n", solverOptions.relTol, solverOptions.absTol);
printf("Max. Krylov iterations: %d\n", solverOptions.maxNumIterations);
printf("----------------------------\n");
}
int ANCFSystem::addParticle(Particle* particle) {
//add the element
particle->setParticleIndex(particles.size());
this->particles.push_back(*particle);
MaterialParticle material;
material.E = particle->getElasticModulus();
material.nu = particle->getNu();
material.mass = particle->getMass();
material.massInverse = 1.0 / particle->getMass();
material.r = particle->getRadius();
material.numContactPoints = 1;
this->pMaterials_h.push_back(material);
// update p
float3 pos0 = particle->getInitialPosition();
pParticle_h.push_back(pos0.x);
pParticle_h.push_back(pos0.y);
pParticle_h.push_back(pos0.z);
// update v
float3 vel0 = particle->getInitialVelocity();
vParticle_h.push_back(vel0.x);
vParticle_h.push_back(vel0.y);
vParticle_h.push_back(vel0.z);
for (int i = 0; i < 3; i++) {
aParticle_h.push_back(0.0);
fParticle_h.push_back(0.0);
}
return particles.size();
}
int ANCFSystem::addElement(Element* element) {
//add the element
element->setElementIndex(elements.size());
this->elements.push_back(*element);
Material material;
material.E = element->getElasticModulus();
material.l = element->getLength_l();
material.nu = element->getNu();
material.rho = element->getDensity();
material.r = element->getRadius();
material.numContactPoints = numContactPoints;
this->materials.push_back(material);
// update p
Node node = element->getNode0();
p_h.push_back(node.x);
p_h.push_back(node.y);
p_h.push_back(node.z);
p_h.push_back(node.dx1);
p_h.push_back(node.dy1);
p_h.push_back(node.dz1);
node = element->getNode1();
p_h.push_back(node.x);
p_h.push_back(node.y);
p_h.push_back(node.z);
p_h.push_back(node.dx1);
p_h.push_back(node.dy1);
p_h.push_back(node.dz1);
for (int i = 0; i < 12; i++) {
e_h.push_back(0.0);
v_h.push_back(0.0);
a_h.push_back(0.0);
anew_h.push_back(0.0);
fint_h.push_back(0.0);
fcon_h.push_back(0.0);
fapp_h.push_back(0.0);
phiqlam_h.push_back(0.0);
delta_h.push_back(0.0);
strainDerivative_h.push_back(0.0);
}
strain_h.push_back(0.0);
for (int i = 0; i < 4; i++) {
Sx_h.push_back(0.0);
Sxx_h.push_back(0.0);
}
//update other vectors (no initial velocity or acceleration)
double r = element->getRadius();
double a = element->getLength_l();
double rho = element->getDensity();
double A = PI * r * r;
// update external force vector (gravity)
fext_h.push_back(rho * A * a * GRAVITYx / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYy / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYz / 0.2e1);
fext_h.push_back(rho * A * a * a * GRAVITYx / 0.12e2);
fext_h.push_back(rho * A * a * a * GRAVITYy / 0.12e2);
fext_h.push_back(rho * A * a * a * GRAVITYz / 0.12e2);
fext_h.push_back(rho * A * a * GRAVITYx / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYy / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYz / 0.2e1);
fext_h.push_back(-rho * A * a * a * GRAVITYx / 0.12e2);
fext_h.push_back(-rho * A * a * a * GRAVITYy / 0.12e2);
fext_h.push_back(-rho * A * a * a * GRAVITYz / 0.12e2);
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
lhsI_h.push_back(i + 12 * (elements.size() - 1));
lhsJ_h.push_back(j + 12 * (elements.size() - 1));
lhs_h.push_back(0.0);
}
}
return elements.size();
}
int ANCFSystem::addForce(Element* element, double xi, float3 force) {
int index = element->getElementIndex();
int l = element->getLength_l();
//fapp_h = fapp_d;
fapp_h[0 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.x;
fapp_h[1 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.y;
fapp_h[2 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.z;
fapp_h[3 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.x;
fapp_h[4 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.y;
fapp_h[5 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.z;
fapp_h[6 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.x;
fapp_h[7 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.y;
fapp_h[8 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.z;
fapp_h[9 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.x;
fapp_h[10 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.y;
fapp_h[11 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.z;
fapp_d = fapp_h;
return 0;
}
int ANCFSystem::clearAppliedForces() {
thrust::fill(fapp_d.begin(), fapp_d.end(), 0.0); //Clear internal forces
return 0;
}
int ANCFSystem::updatePhiq() // used in Newton iteration, nice to keep it separate (but not memory efficient) - only needs to be done once (linear constraints)
{
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
phiqJ_h.push_back(i);
phiqI_h.push_back(constraint.dofLoc.x);
phiq_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
phiqJ_h.push_back(i);
phiqI_h.push_back(constraint.dofLoc.y);
phiq_h.push_back(-1.0);
}
}
phiqI_d = phiqI_h;
phiqJ_d = phiqJ_h;
phiq_d = phiq_h;
thrust::device_ptr<int> wrapped_device_I(CASTI1(phiqI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I,
wrapped_device_I + phiqI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(phiqJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J,
wrapped_device_J + phiqJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(phiq_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + phiq_d.size());
phiq = DeviceView(12 * elements.size(), constraints.size(), phiq_d.size(),
row_indices, column_indices, values);
phiq.sort_by_row();
return 0;
}
__global__ void calculateRHSlower(double* phi, double* p, double* phi0,
double factor, int2* constraintPairs, int numConstraints) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numConstraints) {
int2 constraintPair = constraintPairs[i];
if (constraintPair.y == -1) {
phi[i] = factor * (p[constraintPair.x] - phi0[i]);
} else {
phi[i] = factor
* (p[constraintPair.x] - p[constraintPair.y] - phi0[i]);
}
__syncthreads();
}
}
int ANCFSystem::updatePhi() {
hipLaunchKernelGGL(( calculateRHSlower), dim3(dimGridConstraint),dim3(dimBlockConstraint), 0, 0, CASTD1(phi_d), CASTD1(pnew_d), CASTD1(phi0_d), 1.0/(betaHHT*h*h), CASTI2(constraintPairs_d), constraints.size());
return 0;
}
__global__ void updateParticleDynamics_GPU(double h, double* a, double* v,
double* p, double* f, MaterialParticle* materials, int numParticles) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numParticles) {
a = &a[3 * i];
v = &v[3 * i];
p = &p[3 * i];
f = &f[3 * i];
MaterialParticle material = materials[i];
a[0] = material.massInverse * f[0] + GRAVITYx;
a[1] = material.massInverse * f[1] + GRAVITYy;
a[2] = material.massInverse * f[2] + GRAVITYz;
for (int j = 0; j < 3; j++) {
v[j] += h * a[j];
p[j] += h * v[j];
}
}
}
int ANCFSystem::updateParticleDynamics() {
hipLaunchKernelGGL(( updateParticleDynamics_GPU), dim3(dimGridParticles),dim3(dimBlockParticles), 0, 0, h,CASTD1(aParticle_d), CASTD1(vParticle_d), CASTD1(pParticle_d), CASTD1(fParticle_d), CASTMP(pMaterials_d), particles.size());
return 0;
}
int ANCFSystem::calculateInitialPhi() {
for (int i = 0; i < constraints.size(); i++)
phi0_h.push_back(0);
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
if (constraint.nodeNum2 == -1) {
phi0_h[i] = p_h[constraint.dofLoc.x];
} else {
phi0_h[i] = p_h[constraint.dofLoc.x] - p_h[constraint.dofLoc.y];
}
}
return 0;
}
int ANCFSystem::initializeDevice() {
pMaterials_d = pMaterials_h;
pParticle_d = pParticle_h;
vParticle_d = vParticle_h;
aParticle_d = aParticle_h;
fParticle_d = fParticle_h;
materials_d = materials;
strainDerivative_d = strainDerivative_h;
curvatureDerivative_d = strainDerivative_h;
strain_d = strain_h;
Sx_d = Sx_h;
Sxx_d = Sxx_h;
e_d = e_h;
p_d = p_h;
v_d = v_h;
a_d = a_h;
pnew_d = p_h;
vnew_d = v_h;
anew_d = anew_h;
fext_d = fext_h;
fint_d = fint_h;
fapp_d = fapp_h;
fcon_d = fcon_h;
phi_d = phi_h;
phi0_d = phi0_h;
phiqlam_d = phiqlam_h;
delta_d = delta_h;
constraintPairs_d = constraintPairs_h;
lhsVec_d = anew_h;
lhsI_d = lhsI_h;
lhsJ_d = lhsJ_h;
lhs_d = lhs_h;
constraintsI_d = constraintsI_h;
constraintsJ_d = constraintsJ_h;
constraints_d = constraints_h;
thrust::device_ptr<double> wrapped_device_e(CASTD1(e_d));
thrust::device_ptr<double> wrapped_device_p(CASTD1(p_d));
thrust::device_ptr<double> wrapped_device_v(CASTD1(v_d));
thrust::device_ptr<double> wrapped_device_a(CASTD1(a_d));
thrust::device_ptr<double> wrapped_device_pnew(CASTD1(pnew_d));
thrust::device_ptr<double> wrapped_device_vnew(CASTD1(vnew_d));
thrust::device_ptr<double> wrapped_device_anew(CASTD1(anew_d));
thrust::device_ptr<double> wrapped_device_fext(CASTD1(fext_d));
thrust::device_ptr<double> wrapped_device_fint(CASTD1(fint_d));
thrust::device_ptr<double> wrapped_device_fapp(CASTD1(fapp_d));
thrust::device_ptr<double> wrapped_device_fcon(CASTD1(fcon_d));
thrust::device_ptr<double> wrapped_device_phi(CASTD1(phi_d));
thrust::device_ptr<double> wrapped_device_phi0(CASTD1(phi0_d));
thrust::device_ptr<double> wrapped_device_phiqlam(CASTD1(phiqlam_d));
thrust::device_ptr<double> wrapped_device_delta(CASTD1(delta_d));
thrust::device_ptr<double> wrapped_device_lhsVec(CASTD1(lhsVec_d));
eAll = DeviceValueArrayView(wrapped_device_e,
wrapped_device_e + e_d.size());
eTop = DeviceValueArrayView(wrapped_device_e,
wrapped_device_e + 12 * elements.size());
eBottom = DeviceValueArrayView(wrapped_device_e + 12 * elements.size(),
wrapped_device_e + e_d.size());
p = DeviceValueArrayView(wrapped_device_p, wrapped_device_p + p_d.size());
v = DeviceValueArrayView(wrapped_device_v, wrapped_device_v + v_d.size());
a = DeviceValueArrayView(wrapped_device_a, wrapped_device_a + a_d.size());
pnew = DeviceValueArrayView(wrapped_device_pnew,
wrapped_device_pnew + pnew_d.size());
vnew = DeviceValueArrayView(wrapped_device_vnew,
wrapped_device_vnew + vnew_d.size());
anewAll = DeviceValueArrayView(wrapped_device_anew,
wrapped_device_anew + anew_d.size());
anew = DeviceValueArrayView(wrapped_device_anew,
wrapped_device_anew + 12 * elements.size());
lambda = DeviceValueArrayView(wrapped_device_anew + 12 * elements.size(),
wrapped_device_anew + anew_d.size());
fext = DeviceValueArrayView(wrapped_device_fext,
wrapped_device_fext + fext_d.size());
fint = DeviceValueArrayView(wrapped_device_fint,
wrapped_device_fint + fint_d.size());
fapp = DeviceValueArrayView(wrapped_device_fapp,
wrapped_device_fapp + fapp_d.size());
fcon = DeviceValueArrayView(wrapped_device_fcon,
wrapped_device_fcon + fcon_d.size());
phi = DeviceValueArrayView(wrapped_device_phi,
wrapped_device_phi + phi_d.size());
phi0 = DeviceValueArrayView(wrapped_device_phi0,
wrapped_device_phi0 + phi0_d.size());
phiqlam = DeviceValueArrayView(wrapped_device_phiqlam,
wrapped_device_phiqlam + phiqlam_d.size());
delta = DeviceValueArrayView(wrapped_device_delta,
wrapped_device_delta + delta_d.size());
lhsVec = DeviceValueArrayView(wrapped_device_lhsVec,
wrapped_device_lhsVec + lhsVec_d.size());
// create lhs matrix using cusp library (shouldn't change)
thrust::device_ptr<int> wrapped_device_I(CASTI1(lhsI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I,
wrapped_device_I + lhsI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(lhsJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J,
wrapped_device_J + lhsJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(lhs_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + lhs_d.size());
lhs = DeviceView(anew_d.size(), anew_d.size(), lhs_d.size(), row_indices,
column_indices, values);
// end create lhs matrix
// create the view to the mass block of the lhs matrix
DeviceIndexArrayView row_indices_mass = DeviceIndexArrayView(
wrapped_device_I, wrapped_device_I + 12 * 12 * elements.size());
DeviceIndexArrayView column_indices_mass = DeviceIndexArrayView(
wrapped_device_J, wrapped_device_J + 12 * 12 * elements.size());
DeviceValueArrayView values_mass = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + 12 * 12 * elements.size());
lhs_mass = DeviceView(anew_d.size(), anew_d.size(),
12 * 12 * elements.size(), row_indices_mass, column_indices_mass,
values_mass);
// end create the view to the mass block of the lhs matrix
// create the view to the mass block of the lhs matrix
DeviceIndexArrayView row_indices_phiq = DeviceIndexArrayView(
wrapped_device_I + 12 * 12 * elements.size(),
wrapped_device_I + lhsI_d.size());
DeviceIndexArrayView column_indices_phiq = DeviceIndexArrayView(
wrapped_device_J + 12 * 12 * elements.size(),
wrapped_device_J + lhsJ_d.size());
DeviceValueArrayView values_phiq = DeviceValueArrayView(
wrapped_device_V + 12 * 12 * elements.size(),
wrapped_device_V + lhs_d.size());
lhs_phiq = DeviceView(anew_d.size(), anew_d.size(),
lhs_d.size() - 12 * 12 * elements.size(), row_indices_phiq,
column_indices_phiq, values_phiq);
lhs_phiq.sort_by_row(); // MUST BE SORTED FOR SPMV TO WORK CORRECTLY
// end create the view to the mass block of the lhs matrix
dimBlockConstraint.x = BLOCKDIMCONSTRAINT;
dimGridConstraint.x = static_cast<int>(ceil(
(static_cast<double>(constraints.size()))
/ (static_cast<double>(BLOCKDIMCONSTRAINT))));
dimBlockElement.x = BLOCKDIMELEMENT;
dimGridElement.x = (int) ceil(
((double) (elements.size())) / ((double) BLOCKDIMELEMENT));
dimBlockParticles.x = BLOCKDIMELEMENT;
dimGridParticles.x = (int) ceil(
((double) (particles.size())) / ((double) BLOCKDIMELEMENT));
dimBlockCollision.x = BLOCKDIMCOLLISION;
dimGridCollision.x = (int) ceil(
((double) (particles.size())) / ((double) BLOCKDIMCOLLISION));
return 0;
}
int ANCFSystem::initializeSystem() {
ANCFSystem::updatePhiq();
ANCFSystem::calculateInitialPhi();
for (int i = 0; i < constraints.size(); i++) {
delta_h.push_back(0);
e_h.push_back(0);
anew_h.push_back(0);
phi_h.push_back(0);
constraintPairs_h.push_back(constraints[i].dofLoc);
}
// join phi_q to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
lhsI_h.push_back(i + 12 * elements.size());
lhsJ_h.push_back(constraint.dofLoc.x);
lhs_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
lhsI_h.push_back(i + 12 * elements.size());
lhsJ_h.push_back(constraint.dofLoc.y);
lhs_h.push_back(-1.0);
}
}
// join phi_q' to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
lhsJ_h.push_back(i + 12 * elements.size());
lhsI_h.push_back(constraint.dofLoc.x);
lhs_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
lhsJ_h.push_back(i + 12 * elements.size());
lhsI_h.push_back(constraint.dofLoc.y);
lhs_h.push_back(-1.0);
}
}
// Get constraints
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
constraintsI_h.push_back(i + 12 * elements.size());
constraintsJ_h.push_back(constraint.dofLoc.x);
constraints_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
constraintsI_h.push_back(i + 12 * elements.size());
constraintsJ_h.push_back(constraint.dofLoc.y);
constraints_h.push_back(-1.0);
}
}
// join phi_q' to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
constraintsJ_h.push_back(i + 12 * elements.size());
constraintsI_h.push_back(constraint.dofLoc.x);
constraints_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
constraintsJ_h.push_back(i + 12 * elements.size());
constraintsI_h.push_back(constraint.dofLoc.y);
constraints_h.push_back(-1.0);
}
}
initializeDevice();
//ANCFSystem::initializeBoundingBoxes_CPU();
//detector.updateBoundingBoxes(aabb_data_d);
//detector.setBoundingBoxPointer(&aabb_data_d);
//detector.detectPossibleCollisions();
ANCFSystem::resetLeftHandSideMatrix();
ANCFSystem::updateInternalForces();
//cusp::blas::axpy(fint,eTop,-1);
cusp::blas::axpby(fext, fint, eTop, 1, -1);
// create and setup the Spike::GPU solver
m_spmv = new MySpmv(lhs_mass, lhs_phiq, lhsVec);
mySolver = new SpikeSolver(partitions, solverOptions);
mySolver->setup(lhs);
// char filename[100];
// sprintf(filename, "./lhs.txt");
// cusp::io::write_matrix_market_file(lhs, filename);
cusp::blas::fill(anewAll, 0);
bool success = mySolver->solve(*m_spmv, eAll, anewAll);
spike::Stats stats = mySolver->getStats();
// cout << "Success: " << success << " Iterations: "
// << stats.numIterations << " relResidualNorm: "
// << stats.relResidualNorm << endl;
cout << endl
<< "Linear problem size: " << eAll.size() << endl
<< "Number partitions: " << stats.numPartitions << endl
<< "Bandwidth after MC64: " << stats.bandwidthMC64 << endl
<< "Bandwidth after RCM: " << stats.bandwidthReorder << endl
<< "Bandwidth final: " << stats.bandwidth << endl
<< "nuKf factor: " << stats.nuKf << endl << endl;
cusp::copy(anew, a);
cusp::copy(v, vnew);
cusp::copy(p, pnew);
// Vectors for Spike solver stats
spikeSolveTime.resize(maxNewtonIterations);
spikeNumIter.resize(maxNewtonIterations);
//ANCFSystem::updateParticleDynamics();
return 0;
}
int ANCFSystem::DoTimeStep() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//ANCFSystem::updateParticleDynamics();
stepKrylovIterations = 0;
precUpdated = false;
// update q and q_dot for initial guess
cusp::blas::axpbypcz(p, v, a, pnew, 1, h, .5 * h * h);
cusp::blas::axpby(v, a, vnew, 1, h);
// Force a preconditioner update if needed
if ((preconditionerUpdateModulus > 0) && (timeIndex % preconditionerUpdateModulus == 0)) {
mySolver->update(lhs.values);
precUpdated = true;
printf("Preconditioner updated (step condition)!\n");
}
// Perform Newton iterations
int it;
for (it = 0; it < maxNewtonIterations; it++) {
ANCFSystem::updatePhi();
cusp::multiply(phiq, lambda, phiqlam);
ANCFSystem::resetLeftHandSideMatrix();
cusp::multiply(lhs_mass, anew, eTop); //cusp::multiply(mass,anew,eTop);
ANCFSystem::updateInternalForces();
cusp::blas::axpbypcz(eTop, fapp, fint, eTop, 1, -1, 1);
cusp::blas::axpby(eTop, fext, eTop, 1, -1);
cusp::blas::axpy(phiqlam, eTop, 1);
cusp::blas::copy(phi, eBottom);
// SOLVE THE LINEAR SYSTEM USING SPIKE
cusp::blas::fill(delta, 0); // very important
//stencil lhsStencil(anewAll.size(), lhs_mass, lhs_phiq, lhsVec);
bool success = mySolver->solve(*m_spmv, eAll, delta);
spike::Stats stats = mySolver->getStats();
if(!success) {
printf("********** DUMP DATA **************\n");
char filename[100];
sprintf(filename, "./data/lhs%d.mtx", timeIndex);
cusp::io::write_matrix_market_file(lhs, filename);
sprintf(filename, "./data/rhs%d.mtx", timeIndex);
cusp::io::write_matrix_market_file(eAll, filename);
sprintf(filename, "./data/stats%d.txt", timeIndex);
ofstream file(filename);
file << "Code: " << mySolver->getMonitorCode();
file << " " << mySolver->getMonitorMessage() << std::endl;
file << "Number of iterations = " << stats.numIterations << std::endl;
file << "RHS norm = " << stats.rhsNorm << std::endl;
file << "Residual norm = " << stats.residualNorm << std::endl;
file << "Rel. residual norm = " << stats.relResidualNorm << std::endl;
file.close();
int code = mySolver->getMonitorCode();
if (code == -1 || code == -2) {
//// TODO: clean this up...
std::cout << "STOP" << std::endl;
exit(0);
}
}
spikeSolveTime[it] = stats.timeSolve;
spikeNumIter[it] = stats.numIterations;
stepKrylovIterations += stats.numIterations;
// END SOLVE THE LINEAR SYSTEM
// update anew
cusp::blas::axpy(delta, anewAll, -1);
// update vnew
cusp::blas::axpbypcz(v, a, anew, vnew, 1, h * (1 - gammaHHT), h * gammaHHT);
// update pnew
cusp::blas::axpbypcz(v, a, anew, pnew, h, h * h * .5 * (1 - 2 * betaHHT), h * h * .5 * 2 * betaHHT);
cusp::blas::axpy(p, pnew, 1);
// Calculate infinity norm of the correction and check for convergence
double delta_nrm = cusp::blas::nrmmax(delta);
printf(" Krylov solver: %8.2f ms %.2f iterations ||delta||_inf = %e\n",
stats.timeSolve, stats.numIterations, delta_nrm);
if (delta_nrm <= tol)
break;
}
// Number of Newton iterations and average number of Krylov iterations
stepNewtonIterations = it + 1;
float avgKrylov = stepKrylovIterations / stepNewtonIterations;
// If the average number of Krylov iterations per Newton iteration exceeds the specified limit,
// force a preconditioner update.
if ((preconditionerMaxKrylovIterations > 0) && (avgKrylov > preconditionerMaxKrylovIterations)) {
ANCFSystem::updateInternalForces();
mySolver->update(lhs.values);
precUpdated = true;
printf("Preconditioner updated! (krylov condition)\n");
}
cusp::copy(anew, a);
cusp::copy(vnew, v);
cusp::copy(pnew, p);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
stepTime = elapsedTime;
timeToSimulate += elapsedTime / 1000.0;
p_h = p_d;
// char filename[100];
// sprintf(filename, "./data/lhs%d.txt",timeIndex);
// cusp::io::write_matrix_market_file(lhs, filename);
//
// char filename1[100];
// sprintf(filename1, "./data/rhs%d.txt",timeIndex);
// cusp::io::write_matrix_market_file(eAll, filename1);
time += h;
timeIndex++;
printf("%f, Elapsed time = %8.2f ms, Newton = %2d, Ave. Krylov Per Newton = %.2f\n",
time, elapsedTime, stepNewtonIterations, avgKrylov);
return 0;
}
float3 ANCFSystem::getXYZPosition(int elementIndex, double xi) {
double a = elements[elementIndex].getLength_l();
double* p = CASTD1(p_h);
p = &p[12 * elementIndex];
float3 pos;
pos.x = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[0]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[3]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[6]
+ a * (-xi * xi + pow(xi, 3)) * p[9];
pos.y = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[1]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[4]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[7]
+ a * (-xi * xi + pow(xi, 3)) * p[10];
pos.z = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[2]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[5]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[8]
+ a * (-xi * xi + pow(xi, 3)) * p[11];
return pos;
}
float3 ANCFSystem::getXYZVelocity(int elementIndex, double xi) {
double a = elements[elementIndex].getLength_l();
double* p = CASTD1(v_h);
p = &p[12 * elementIndex];
float3 pos;
pos.x = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[0]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[3]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[6]
+ a * (-xi * xi + pow(xi, 3)) * p[9];
pos.y = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[1]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[4]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[7]
+ a * (-xi * xi + pow(xi, 3)) * p[10];
pos.z = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[2]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[5]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[8]
+ a * (-xi * xi + pow(xi, 3)) * p[11];
return pos;
}
float3 ANCFSystem::getXYZPositionParticle(int index) {
return make_float3(pParticle_h[3 * index], pParticle_h[3 * index + 1],
pParticle_h[3 * index + 2]);
}
float3 ANCFSystem::getXYZVelocityParticle(int index) {
return make_float3(vParticle_h[3 * index], vParticle_h[3 * index + 1],
vParticle_h[3 * index + 2]);
}
int ANCFSystem::saveLHS() {
char filename[100];
posFile.open("../lhs.dat");
posFile << "symmetric" << endl;
posFile << anew_h.size() << " " << anew_h.size() << " " << lhsI_h.size()
<< endl;
for (int i = 0; i < lhsI_h.size(); i++) {
posFile << lhsI_h[i] << " " << lhsJ_h[i] << " " << lhs_h[i] << endl;
}
posFile.close();
return 0;
}
int ANCFSystem::writeToFile(string fileName) {
//char filename1[100];
//sprintf(filename1, "./posData/lhs%d.dat", fileIndex);
//cusp::io::write_matrix_market_file(lhs, filename1);
posFile.open(fileName.c_str());
p_h = p_d;
double* posAll = CASTD1(p_h);
double* pos;
float3 posPart;
double l;
double r;
posFile << elements.size() << "," << endl;
// for(int i=0;i<particles.size();i++)
// {
// r = particles[i].getRadius();
// posPart = getXYZPositionParticle(i);
// posFile << r << ", " << posPart.x << ", " << posPart.y << ", " << posPart.z << "," << endl;
// }
for (int i = 0; i < elements.size(); i++) {
l = elements[i].getLength_l();
r = elements[i].getRadius();
pos = &posAll[12 * i];
posFile << r << "," << l;
for (int i = 0; i < 12; i++)
posFile << "," << pos[i];
posFile << "," << endl;
}
posFile.close();
return 0;
}
| b4d18875d859a93f2ab0747286967baa90b210de.cu | #include <algorithm>
#include <vector>
#include "include.cuh"
#include "ANCFSystem.cuh"
#include <cusp/io/matrix_market.h>
//// linear operator y = A*x (for CUSP)
//class stencil: public cusp::linear_operator<double, cusp::device_memory> {
//public:
// typedef cusp::linear_operator<double, cusp::device_memory> super;
//
// int N;
// DeviceView massMatrix;
// DeviceView phiqMatrix;
// DeviceValueArrayView temp;
//
//// constructor
// stencil(int N, DeviceView lhs_mass, DeviceView lhs_phiq,
// DeviceValueArrayView tempVector) :
// super(N, N), N(N) {
// massMatrix = lhs_mass;
// phiqMatrix = lhs_phiq;
// temp = tempVector;
// }
//
//// linear operator y = A*x
// template<typename VectorType1, typename VectorType2>
// void operator()(const VectorType1& x, VectorType2& y) const {
//// obtain a raw pointer to device memory
// cusp::multiply(massMatrix, x, temp);
// cusp::multiply(phiqMatrix, x, y);
// cusp::blas::axpy(temp, y, 1);
// }
//};
ANCFSystem::ANCFSystem()
{
// Set default solver parameters
setAlpha_HHT(-0.1);
setTimeStep(1e-3);
maxNewtonIterations = 20;
// spike stuff
partitions = 1;
solverOptions.safeFactorization = true;
solverOptions.trackReordering = true;
solverOptions.maxNumIterations = 5000;
//mySpmv = new SpmvFunctor(lhs);
// m_spmv = new MySpmv(lhs_mass, lhs_phiq, lhsVec);
preconditionerUpdateModulus = -1; // the preconditioner updates every ___ time steps
preconditionerMaxKrylovIterations = -1; // the preconditioner updates if Krylov iterations are greater than ____ iterations
// end spike stuff
this->timeIndex = 0;
this->time = 0;
timeToSimulate = 0;
simTime = 0;
fullJacobian = 1;
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
numCollisions = 0;
numCollisionsSphere = 0;
numContactPoints = 5;
coefRestitution = .3;
frictionCoef = .3;
fileIndex = 0;
// set up position files
char filename1[100];
char filename2[100];
char filename3[100];
sprintf(filename1, "position.dat");
resultsFile1.open(filename1);
sprintf(filename2, "energy.dat");
resultsFile2.open(filename2);
sprintf(filename3, "reactions.dat");
resultsFile3.open(filename3);
}
void ANCFSystem::setSolverType(int solverType)
{
switch(solverType) {
case 0:
solverOptions.solverType = spike::BiCGStab;
break;
case 1:
solverOptions.solverType = spike::BiCGStab1;
break;
case 2:
solverOptions.solverType = spike::BiCGStab2;
break;
case 3:
solverOptions.solverType = spike::MINRES;
break;
}
}
void ANCFSystem::setPrecondType(int useSpike)
{
solverOptions.precondType = useSpike ? spike::Spike : spike::None;
}
void ANCFSystem::setAlpha_HHT(double alpha) {
// should be greater than -.3, usually set to -.1
alphaHHT = alpha;
betaHHT = (1 - alphaHHT) * (1 - alphaHHT) * .25;
gammaHHT = 0.5 - alphaHHT;
}
void ANCFSystem::setTimeStep(double step_size,
double precision)
{
h = step_size;
// Set tolerance for Newton iteration based on the precision in positions
// and integration step-size.
double safety = 1;////0.5;
tol = safety * precision / (h * h);
// Set the tolerances for Krylov
solverOptions.relTol = std::min(0.01 * tol, 1e-6);
solverOptions.absTol = 1e-10;
}
void ANCFSystem::printSolverParams()
{
printf("Step size: %e\n", h);
printf("Newton tolerance: %e\n", tol);
printf("Max. Newton iterations: %d\n", maxNewtonIterations);
printf("Krylov relTol: %e abdTol: %e\n", solverOptions.relTol, solverOptions.absTol);
printf("Max. Krylov iterations: %d\n", solverOptions.maxNumIterations);
printf("----------------------------\n");
}
int ANCFSystem::addParticle(Particle* particle) {
//add the element
particle->setParticleIndex(particles.size());
this->particles.push_back(*particle);
MaterialParticle material;
material.E = particle->getElasticModulus();
material.nu = particle->getNu();
material.mass = particle->getMass();
material.massInverse = 1.0 / particle->getMass();
material.r = particle->getRadius();
material.numContactPoints = 1;
this->pMaterials_h.push_back(material);
// update p
float3 pos0 = particle->getInitialPosition();
pParticle_h.push_back(pos0.x);
pParticle_h.push_back(pos0.y);
pParticle_h.push_back(pos0.z);
// update v
float3 vel0 = particle->getInitialVelocity();
vParticle_h.push_back(vel0.x);
vParticle_h.push_back(vel0.y);
vParticle_h.push_back(vel0.z);
for (int i = 0; i < 3; i++) {
aParticle_h.push_back(0.0);
fParticle_h.push_back(0.0);
}
return particles.size();
}
int ANCFSystem::addElement(Element* element) {
//add the element
element->setElementIndex(elements.size());
this->elements.push_back(*element);
Material material;
material.E = element->getElasticModulus();
material.l = element->getLength_l();
material.nu = element->getNu();
material.rho = element->getDensity();
material.r = element->getRadius();
material.numContactPoints = numContactPoints;
this->materials.push_back(material);
// update p
Node node = element->getNode0();
p_h.push_back(node.x);
p_h.push_back(node.y);
p_h.push_back(node.z);
p_h.push_back(node.dx1);
p_h.push_back(node.dy1);
p_h.push_back(node.dz1);
node = element->getNode1();
p_h.push_back(node.x);
p_h.push_back(node.y);
p_h.push_back(node.z);
p_h.push_back(node.dx1);
p_h.push_back(node.dy1);
p_h.push_back(node.dz1);
for (int i = 0; i < 12; i++) {
e_h.push_back(0.0);
v_h.push_back(0.0);
a_h.push_back(0.0);
anew_h.push_back(0.0);
fint_h.push_back(0.0);
fcon_h.push_back(0.0);
fapp_h.push_back(0.0);
phiqlam_h.push_back(0.0);
delta_h.push_back(0.0);
strainDerivative_h.push_back(0.0);
}
strain_h.push_back(0.0);
for (int i = 0; i < 4; i++) {
Sx_h.push_back(0.0);
Sxx_h.push_back(0.0);
}
//update other vectors (no initial velocity or acceleration)
double r = element->getRadius();
double a = element->getLength_l();
double rho = element->getDensity();
double A = PI * r * r;
// update external force vector (gravity)
fext_h.push_back(rho * A * a * GRAVITYx / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYy / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYz / 0.2e1);
fext_h.push_back(rho * A * a * a * GRAVITYx / 0.12e2);
fext_h.push_back(rho * A * a * a * GRAVITYy / 0.12e2);
fext_h.push_back(rho * A * a * a * GRAVITYz / 0.12e2);
fext_h.push_back(rho * A * a * GRAVITYx / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYy / 0.2e1);
fext_h.push_back(rho * A * a * GRAVITYz / 0.2e1);
fext_h.push_back(-rho * A * a * a * GRAVITYx / 0.12e2);
fext_h.push_back(-rho * A * a * a * GRAVITYy / 0.12e2);
fext_h.push_back(-rho * A * a * a * GRAVITYz / 0.12e2);
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
lhsI_h.push_back(i + 12 * (elements.size() - 1));
lhsJ_h.push_back(j + 12 * (elements.size() - 1));
lhs_h.push_back(0.0);
}
}
return elements.size();
}
int ANCFSystem::addForce(Element* element, double xi, float3 force) {
int index = element->getElementIndex();
int l = element->getLength_l();
//fapp_h = fapp_d;
fapp_h[0 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.x;
fapp_h[1 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.y;
fapp_h[2 + 12 * index] += (1 - 3 * xi * xi + 2 * pow(xi, 3)) * force.z;
fapp_h[3 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.x;
fapp_h[4 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.y;
fapp_h[5 + 12 * index] += l * (xi - 2 * xi * xi + pow(xi, 3)) * force.z;
fapp_h[6 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.x;
fapp_h[7 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.y;
fapp_h[8 + 12 * index] += (3 * xi * xi - 2 * pow(xi, 3)) * force.z;
fapp_h[9 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.x;
fapp_h[10 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.y;
fapp_h[11 + 12 * index] += l * (-xi * xi + pow(xi, 3)) * force.z;
fapp_d = fapp_h;
return 0;
}
int ANCFSystem::clearAppliedForces() {
thrust::fill(fapp_d.begin(), fapp_d.end(), 0.0); //Clear internal forces
return 0;
}
int ANCFSystem::updatePhiq() // used in Newton iteration, nice to keep it separate (but not memory efficient) - only needs to be done once (linear constraints)
{
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
phiqJ_h.push_back(i);
phiqI_h.push_back(constraint.dofLoc.x);
phiq_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
phiqJ_h.push_back(i);
phiqI_h.push_back(constraint.dofLoc.y);
phiq_h.push_back(-1.0);
}
}
phiqI_d = phiqI_h;
phiqJ_d = phiqJ_h;
phiq_d = phiq_h;
thrust::device_ptr<int> wrapped_device_I(CASTI1(phiqI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I,
wrapped_device_I + phiqI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(phiqJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J,
wrapped_device_J + phiqJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(phiq_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + phiq_d.size());
phiq = DeviceView(12 * elements.size(), constraints.size(), phiq_d.size(),
row_indices, column_indices, values);
phiq.sort_by_row();
return 0;
}
__global__ void calculateRHSlower(double* phi, double* p, double* phi0,
double factor, int2* constraintPairs, int numConstraints) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numConstraints) {
int2 constraintPair = constraintPairs[i];
if (constraintPair.y == -1) {
phi[i] = factor * (p[constraintPair.x] - phi0[i]);
} else {
phi[i] = factor
* (p[constraintPair.x] - p[constraintPair.y] - phi0[i]);
}
__syncthreads();
}
}
int ANCFSystem::updatePhi() {
calculateRHSlower<<<dimGridConstraint,dimBlockConstraint>>>(CASTD1(phi_d), CASTD1(pnew_d), CASTD1(phi0_d), 1.0/(betaHHT*h*h), CASTI2(constraintPairs_d), constraints.size());
return 0;
}
__global__ void updateParticleDynamics_GPU(double h, double* a, double* v,
double* p, double* f, MaterialParticle* materials, int numParticles) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numParticles) {
a = &a[3 * i];
v = &v[3 * i];
p = &p[3 * i];
f = &f[3 * i];
MaterialParticle material = materials[i];
a[0] = material.massInverse * f[0] + GRAVITYx;
a[1] = material.massInverse * f[1] + GRAVITYy;
a[2] = material.massInverse * f[2] + GRAVITYz;
for (int j = 0; j < 3; j++) {
v[j] += h * a[j];
p[j] += h * v[j];
}
}
}
int ANCFSystem::updateParticleDynamics() {
updateParticleDynamics_GPU<<<dimGridParticles,dimBlockParticles>>>(h,CASTD1(aParticle_d), CASTD1(vParticle_d), CASTD1(pParticle_d), CASTD1(fParticle_d), CASTMP(pMaterials_d), particles.size());
return 0;
}
int ANCFSystem::calculateInitialPhi() {
for (int i = 0; i < constraints.size(); i++)
phi0_h.push_back(0);
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
if (constraint.nodeNum2 == -1) {
phi0_h[i] = p_h[constraint.dofLoc.x];
} else {
phi0_h[i] = p_h[constraint.dofLoc.x] - p_h[constraint.dofLoc.y];
}
}
return 0;
}
int ANCFSystem::initializeDevice() {
pMaterials_d = pMaterials_h;
pParticle_d = pParticle_h;
vParticle_d = vParticle_h;
aParticle_d = aParticle_h;
fParticle_d = fParticle_h;
materials_d = materials;
strainDerivative_d = strainDerivative_h;
curvatureDerivative_d = strainDerivative_h;
strain_d = strain_h;
Sx_d = Sx_h;
Sxx_d = Sxx_h;
e_d = e_h;
p_d = p_h;
v_d = v_h;
a_d = a_h;
pnew_d = p_h;
vnew_d = v_h;
anew_d = anew_h;
fext_d = fext_h;
fint_d = fint_h;
fapp_d = fapp_h;
fcon_d = fcon_h;
phi_d = phi_h;
phi0_d = phi0_h;
phiqlam_d = phiqlam_h;
delta_d = delta_h;
constraintPairs_d = constraintPairs_h;
lhsVec_d = anew_h;
lhsI_d = lhsI_h;
lhsJ_d = lhsJ_h;
lhs_d = lhs_h;
constraintsI_d = constraintsI_h;
constraintsJ_d = constraintsJ_h;
constraints_d = constraints_h;
thrust::device_ptr<double> wrapped_device_e(CASTD1(e_d));
thrust::device_ptr<double> wrapped_device_p(CASTD1(p_d));
thrust::device_ptr<double> wrapped_device_v(CASTD1(v_d));
thrust::device_ptr<double> wrapped_device_a(CASTD1(a_d));
thrust::device_ptr<double> wrapped_device_pnew(CASTD1(pnew_d));
thrust::device_ptr<double> wrapped_device_vnew(CASTD1(vnew_d));
thrust::device_ptr<double> wrapped_device_anew(CASTD1(anew_d));
thrust::device_ptr<double> wrapped_device_fext(CASTD1(fext_d));
thrust::device_ptr<double> wrapped_device_fint(CASTD1(fint_d));
thrust::device_ptr<double> wrapped_device_fapp(CASTD1(fapp_d));
thrust::device_ptr<double> wrapped_device_fcon(CASTD1(fcon_d));
thrust::device_ptr<double> wrapped_device_phi(CASTD1(phi_d));
thrust::device_ptr<double> wrapped_device_phi0(CASTD1(phi0_d));
thrust::device_ptr<double> wrapped_device_phiqlam(CASTD1(phiqlam_d));
thrust::device_ptr<double> wrapped_device_delta(CASTD1(delta_d));
thrust::device_ptr<double> wrapped_device_lhsVec(CASTD1(lhsVec_d));
eAll = DeviceValueArrayView(wrapped_device_e,
wrapped_device_e + e_d.size());
eTop = DeviceValueArrayView(wrapped_device_e,
wrapped_device_e + 12 * elements.size());
eBottom = DeviceValueArrayView(wrapped_device_e + 12 * elements.size(),
wrapped_device_e + e_d.size());
p = DeviceValueArrayView(wrapped_device_p, wrapped_device_p + p_d.size());
v = DeviceValueArrayView(wrapped_device_v, wrapped_device_v + v_d.size());
a = DeviceValueArrayView(wrapped_device_a, wrapped_device_a + a_d.size());
pnew = DeviceValueArrayView(wrapped_device_pnew,
wrapped_device_pnew + pnew_d.size());
vnew = DeviceValueArrayView(wrapped_device_vnew,
wrapped_device_vnew + vnew_d.size());
anewAll = DeviceValueArrayView(wrapped_device_anew,
wrapped_device_anew + anew_d.size());
anew = DeviceValueArrayView(wrapped_device_anew,
wrapped_device_anew + 12 * elements.size());
lambda = DeviceValueArrayView(wrapped_device_anew + 12 * elements.size(),
wrapped_device_anew + anew_d.size());
fext = DeviceValueArrayView(wrapped_device_fext,
wrapped_device_fext + fext_d.size());
fint = DeviceValueArrayView(wrapped_device_fint,
wrapped_device_fint + fint_d.size());
fapp = DeviceValueArrayView(wrapped_device_fapp,
wrapped_device_fapp + fapp_d.size());
fcon = DeviceValueArrayView(wrapped_device_fcon,
wrapped_device_fcon + fcon_d.size());
phi = DeviceValueArrayView(wrapped_device_phi,
wrapped_device_phi + phi_d.size());
phi0 = DeviceValueArrayView(wrapped_device_phi0,
wrapped_device_phi0 + phi0_d.size());
phiqlam = DeviceValueArrayView(wrapped_device_phiqlam,
wrapped_device_phiqlam + phiqlam_d.size());
delta = DeviceValueArrayView(wrapped_device_delta,
wrapped_device_delta + delta_d.size());
lhsVec = DeviceValueArrayView(wrapped_device_lhsVec,
wrapped_device_lhsVec + lhsVec_d.size());
// create lhs matrix using cusp library (shouldn't change)
thrust::device_ptr<int> wrapped_device_I(CASTI1(lhsI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I,
wrapped_device_I + lhsI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(lhsJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J,
wrapped_device_J + lhsJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(lhs_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + lhs_d.size());
lhs = DeviceView(anew_d.size(), anew_d.size(), lhs_d.size(), row_indices,
column_indices, values);
// end create lhs matrix
// create the view to the mass block of the lhs matrix
DeviceIndexArrayView row_indices_mass = DeviceIndexArrayView(
wrapped_device_I, wrapped_device_I + 12 * 12 * elements.size());
DeviceIndexArrayView column_indices_mass = DeviceIndexArrayView(
wrapped_device_J, wrapped_device_J + 12 * 12 * elements.size());
DeviceValueArrayView values_mass = DeviceValueArrayView(wrapped_device_V,
wrapped_device_V + 12 * 12 * elements.size());
lhs_mass = DeviceView(anew_d.size(), anew_d.size(),
12 * 12 * elements.size(), row_indices_mass, column_indices_mass,
values_mass);
// end create the view to the mass block of the lhs matrix
// create the view to the mass block of the lhs matrix
DeviceIndexArrayView row_indices_phiq = DeviceIndexArrayView(
wrapped_device_I + 12 * 12 * elements.size(),
wrapped_device_I + lhsI_d.size());
DeviceIndexArrayView column_indices_phiq = DeviceIndexArrayView(
wrapped_device_J + 12 * 12 * elements.size(),
wrapped_device_J + lhsJ_d.size());
DeviceValueArrayView values_phiq = DeviceValueArrayView(
wrapped_device_V + 12 * 12 * elements.size(),
wrapped_device_V + lhs_d.size());
lhs_phiq = DeviceView(anew_d.size(), anew_d.size(),
lhs_d.size() - 12 * 12 * elements.size(), row_indices_phiq,
column_indices_phiq, values_phiq);
lhs_phiq.sort_by_row(); // MUST BE SORTED FOR SPMV TO WORK CORRECTLY
// end create the view to the mass block of the lhs matrix
dimBlockConstraint.x = BLOCKDIMCONSTRAINT;
dimGridConstraint.x = static_cast<int>(ceil(
(static_cast<double>(constraints.size()))
/ (static_cast<double>(BLOCKDIMCONSTRAINT))));
dimBlockElement.x = BLOCKDIMELEMENT;
dimGridElement.x = (int) ceil(
((double) (elements.size())) / ((double) BLOCKDIMELEMENT));
dimBlockParticles.x = BLOCKDIMELEMENT;
dimGridParticles.x = (int) ceil(
((double) (particles.size())) / ((double) BLOCKDIMELEMENT));
dimBlockCollision.x = BLOCKDIMCOLLISION;
dimGridCollision.x = (int) ceil(
((double) (particles.size())) / ((double) BLOCKDIMCOLLISION));
return 0;
}
int ANCFSystem::initializeSystem() {
ANCFSystem::updatePhiq();
ANCFSystem::calculateInitialPhi();
for (int i = 0; i < constraints.size(); i++) {
delta_h.push_back(0);
e_h.push_back(0);
anew_h.push_back(0);
phi_h.push_back(0);
constraintPairs_h.push_back(constraints[i].dofLoc);
}
// join phi_q to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
lhsI_h.push_back(i + 12 * elements.size());
lhsJ_h.push_back(constraint.dofLoc.x);
lhs_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
lhsI_h.push_back(i + 12 * elements.size());
lhsJ_h.push_back(constraint.dofLoc.y);
lhs_h.push_back(-1.0);
}
}
// join phi_q' to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
lhsJ_h.push_back(i + 12 * elements.size());
lhsI_h.push_back(constraint.dofLoc.x);
lhs_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
lhsJ_h.push_back(i + 12 * elements.size());
lhsI_h.push_back(constraint.dofLoc.y);
lhs_h.push_back(-1.0);
}
}
// Get constraints
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
constraintsI_h.push_back(i + 12 * elements.size());
constraintsJ_h.push_back(constraint.dofLoc.x);
constraints_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
constraintsI_h.push_back(i + 12 * elements.size());
constraintsJ_h.push_back(constraint.dofLoc.y);
constraints_h.push_back(-1.0);
}
}
// join phi_q' to lhs
for (int i = 0; i < constraints.size(); i++) {
Constraint constraint = constraints[i];
constraintsJ_h.push_back(i + 12 * elements.size());
constraintsI_h.push_back(constraint.dofLoc.x);
constraints_h.push_back(1.0);
if (constraint.nodeNum2 != -1) {
constraintsJ_h.push_back(i + 12 * elements.size());
constraintsI_h.push_back(constraint.dofLoc.y);
constraints_h.push_back(-1.0);
}
}
initializeDevice();
//ANCFSystem::initializeBoundingBoxes_CPU();
//detector.updateBoundingBoxes(aabb_data_d);
//detector.setBoundingBoxPointer(&aabb_data_d);
//detector.detectPossibleCollisions();
ANCFSystem::resetLeftHandSideMatrix();
ANCFSystem::updateInternalForces();
//cusp::blas::axpy(fint,eTop,-1);
cusp::blas::axpby(fext, fint, eTop, 1, -1);
// create and setup the Spike::GPU solver
m_spmv = new MySpmv(lhs_mass, lhs_phiq, lhsVec);
mySolver = new SpikeSolver(partitions, solverOptions);
mySolver->setup(lhs);
// char filename[100];
// sprintf(filename, "./lhs.txt");
// cusp::io::write_matrix_market_file(lhs, filename);
cusp::blas::fill(anewAll, 0);
bool success = mySolver->solve(*m_spmv, eAll, anewAll);
spike::Stats stats = mySolver->getStats();
// cout << "Success: " << success << " Iterations: "
// << stats.numIterations << " relResidualNorm: "
// << stats.relResidualNorm << endl;
cout << endl
<< "Linear problem size: " << eAll.size() << endl
<< "Number partitions: " << stats.numPartitions << endl
<< "Bandwidth after MC64: " << stats.bandwidthMC64 << endl
<< "Bandwidth after RCM: " << stats.bandwidthReorder << endl
<< "Bandwidth final: " << stats.bandwidth << endl
<< "nuKf factor: " << stats.nuKf << endl << endl;
cusp::copy(anew, a);
cusp::copy(v, vnew);
cusp::copy(p, pnew);
// Vectors for Spike solver stats
spikeSolveTime.resize(maxNewtonIterations);
spikeNumIter.resize(maxNewtonIterations);
//ANCFSystem::updateParticleDynamics();
return 0;
}
int ANCFSystem::DoTimeStep() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//ANCFSystem::updateParticleDynamics();
stepKrylovIterations = 0;
precUpdated = false;
// update q and q_dot for initial guess
cusp::blas::axpbypcz(p, v, a, pnew, 1, h, .5 * h * h);
cusp::blas::axpby(v, a, vnew, 1, h);
// Force a preconditioner update if needed
if ((preconditionerUpdateModulus > 0) && (timeIndex % preconditionerUpdateModulus == 0)) {
mySolver->update(lhs.values);
precUpdated = true;
printf("Preconditioner updated (step condition)!\n");
}
// Perform Newton iterations
int it;
for (it = 0; it < maxNewtonIterations; it++) {
ANCFSystem::updatePhi();
cusp::multiply(phiq, lambda, phiqlam);
ANCFSystem::resetLeftHandSideMatrix();
cusp::multiply(lhs_mass, anew, eTop); //cusp::multiply(mass,anew,eTop);
ANCFSystem::updateInternalForces();
cusp::blas::axpbypcz(eTop, fapp, fint, eTop, 1, -1, 1);
cusp::blas::axpby(eTop, fext, eTop, 1, -1);
cusp::blas::axpy(phiqlam, eTop, 1);
cusp::blas::copy(phi, eBottom);
// SOLVE THE LINEAR SYSTEM USING SPIKE
cusp::blas::fill(delta, 0); // very important
//stencil lhsStencil(anewAll.size(), lhs_mass, lhs_phiq, lhsVec);
bool success = mySolver->solve(*m_spmv, eAll, delta);
spike::Stats stats = mySolver->getStats();
if(!success) {
printf("********** DUMP DATA **************\n");
char filename[100];
sprintf(filename, "./data/lhs%d.mtx", timeIndex);
cusp::io::write_matrix_market_file(lhs, filename);
sprintf(filename, "./data/rhs%d.mtx", timeIndex);
cusp::io::write_matrix_market_file(eAll, filename);
sprintf(filename, "./data/stats%d.txt", timeIndex);
ofstream file(filename);
file << "Code: " << mySolver->getMonitorCode();
file << " " << mySolver->getMonitorMessage() << std::endl;
file << "Number of iterations = " << stats.numIterations << std::endl;
file << "RHS norm = " << stats.rhsNorm << std::endl;
file << "Residual norm = " << stats.residualNorm << std::endl;
file << "Rel. residual norm = " << stats.relResidualNorm << std::endl;
file.close();
int code = mySolver->getMonitorCode();
if (code == -1 || code == -2) {
//// TODO: clean this up...
std::cout << "STOP" << std::endl;
exit(0);
}
}
spikeSolveTime[it] = stats.timeSolve;
spikeNumIter[it] = stats.numIterations;
stepKrylovIterations += stats.numIterations;
// END SOLVE THE LINEAR SYSTEM
// update anew
cusp::blas::axpy(delta, anewAll, -1);
// update vnew
cusp::blas::axpbypcz(v, a, anew, vnew, 1, h * (1 - gammaHHT), h * gammaHHT);
// update pnew
cusp::blas::axpbypcz(v, a, anew, pnew, h, h * h * .5 * (1 - 2 * betaHHT), h * h * .5 * 2 * betaHHT);
cusp::blas::axpy(p, pnew, 1);
// Calculate infinity norm of the correction and check for convergence
double delta_nrm = cusp::blas::nrmmax(delta);
printf(" Krylov solver: %8.2f ms %.2f iterations ||delta||_inf = %e\n",
stats.timeSolve, stats.numIterations, delta_nrm);
if (delta_nrm <= tol)
break;
}
// Number of Newton iterations and average number of Krylov iterations
stepNewtonIterations = it + 1;
float avgKrylov = stepKrylovIterations / stepNewtonIterations;
// If the average number of Krylov iterations per Newton iteration exceeds the specified limit,
// force a preconditioner update.
if ((preconditionerMaxKrylovIterations > 0) && (avgKrylov > preconditionerMaxKrylovIterations)) {
ANCFSystem::updateInternalForces();
mySolver->update(lhs.values);
precUpdated = true;
printf("Preconditioner updated! (krylov condition)\n");
}
cusp::copy(anew, a);
cusp::copy(vnew, v);
cusp::copy(pnew, p);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
stepTime = elapsedTime;
timeToSimulate += elapsedTime / 1000.0;
p_h = p_d;
// char filename[100];
// sprintf(filename, "./data/lhs%d.txt",timeIndex);
// cusp::io::write_matrix_market_file(lhs, filename);
//
// char filename1[100];
// sprintf(filename1, "./data/rhs%d.txt",timeIndex);
// cusp::io::write_matrix_market_file(eAll, filename1);
time += h;
timeIndex++;
printf("%f, Elapsed time = %8.2f ms, Newton = %2d, Ave. Krylov Per Newton = %.2f\n",
time, elapsedTime, stepNewtonIterations, avgKrylov);
return 0;
}
float3 ANCFSystem::getXYZPosition(int elementIndex, double xi) {
double a = elements[elementIndex].getLength_l();
double* p = CASTD1(p_h);
p = &p[12 * elementIndex];
float3 pos;
pos.x = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[0]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[3]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[6]
+ a * (-xi * xi + pow(xi, 3)) * p[9];
pos.y = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[1]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[4]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[7]
+ a * (-xi * xi + pow(xi, 3)) * p[10];
pos.z = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[2]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[5]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[8]
+ a * (-xi * xi + pow(xi, 3)) * p[11];
return pos;
}
float3 ANCFSystem::getXYZVelocity(int elementIndex, double xi) {
double a = elements[elementIndex].getLength_l();
double* p = CASTD1(v_h);
p = &p[12 * elementIndex];
float3 pos;
pos.x = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[0]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[3]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[6]
+ a * (-xi * xi + pow(xi, 3)) * p[9];
pos.y = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[1]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[4]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[7]
+ a * (-xi * xi + pow(xi, 3)) * p[10];
pos.z = (1 - 3 * xi * xi + 2 * pow(xi, 3)) * p[2]
+ a * (xi - 2 * xi * xi + pow(xi, 3)) * p[5]
+ (3 * xi * xi - 2 * pow(xi, 3)) * p[8]
+ a * (-xi * xi + pow(xi, 3)) * p[11];
return pos;
}
float3 ANCFSystem::getXYZPositionParticle(int index) {
return make_float3(pParticle_h[3 * index], pParticle_h[3 * index + 1],
pParticle_h[3 * index + 2]);
}
float3 ANCFSystem::getXYZVelocityParticle(int index) {
return make_float3(vParticle_h[3 * index], vParticle_h[3 * index + 1],
vParticle_h[3 * index + 2]);
}
int ANCFSystem::saveLHS() {
char filename[100];
posFile.open("../lhs.dat");
posFile << "symmetric" << endl;
posFile << anew_h.size() << " " << anew_h.size() << " " << lhsI_h.size()
<< endl;
for (int i = 0; i < lhsI_h.size(); i++) {
posFile << lhsI_h[i] << " " << lhsJ_h[i] << " " << lhs_h[i] << endl;
}
posFile.close();
return 0;
}
int ANCFSystem::writeToFile(string fileName) {
//char filename1[100];
//sprintf(filename1, "./posData/lhs%d.dat", fileIndex);
//cusp::io::write_matrix_market_file(lhs, filename1);
posFile.open(fileName.c_str());
p_h = p_d;
double* posAll = CASTD1(p_h);
double* pos;
float3 posPart;
double l;
double r;
posFile << elements.size() << "," << endl;
// for(int i=0;i<particles.size();i++)
// {
// r = particles[i].getRadius();
// posPart = getXYZPositionParticle(i);
// posFile << r << ", " << posPart.x << ", " << posPart.y << ", " << posPart.z << "," << endl;
// }
for (int i = 0; i < elements.size(); i++) {
l = elements[i].getLength_l();
r = elements[i].getRadius();
pos = &posAll[12 * i];
posFile << r << "," << l;
for (int i = 0; i < 12; i++)
posFile << "," << pos[i];
posFile << "," << endl;
}
posFile.close();
return 0;
}
|
d4f4cfb14686836be864c3266751840685394565.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <omp.h>
#include <climits>
#include "hip/hip_runtime.h"
#include "Device.h"
#include "IndiceTools.h"
#include "HeatTransfertAdvanced.h"
using cpu::IntervalI;
HeatTransfertAdvanced::HeatTransfertAdvanced(unsigned int width,
unsigned int height,
float propagationSpeed,
string title,
ComputeMode computeMode,
bool isMultiGPU) : variateurN(IntervalI(0, INT_MAX), 1)
{
// Inputs
this->width = width;
this->height = height;
this->totalPixels = width * height;
this->title = title;
this->computeMode = computeMode;
// Tools
this->iteration = 0;
this->propagationSpeed = propagationSpeed;
this->NB_ITERATION_AVEUGLE = 20;
this->isBufferA = true;
// Cuda grid dimensions
// this->db = dim3(32, 32, 1);
// this->dg = dim3(16, 16, 1);
// Check
Device::assertDim(this->dg, this->db);
Device::printAll();
float imageInit[this->totalPixels];
float imageHeater[this->totalPixels];
unsigned int s = 0;
while(s++ < this->totalPixels)
{
imageInit[s] = 0.0;
int i, j;
IndiceTools::toIJ(s, width, &i, &j);
if (i >= 191 && i < 319 && j >= 191 && j < 319)
{
imageHeater[s] = 1.0;
}
else if ((i >= 113 && i < 123 && j >= 113 && j < 123) || (i >= 113 && i < 123 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 113 && j < 123)
|| (i >= 387 && i < 397 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 387 && j < 397))
{
imageHeater[s] = 0.2;
}
else
{
imageHeater[s] = 0.0;
}
}
// Size of all pixels of an image
size_t arraySize = sizeof(float) * this->totalPixels;
// Allocating memory to GPU
HANDLE_ERROR(hipMalloc(&this->ptrDevImageHeater, arraySize));
HANDLE_ERROR(hipMalloc(&this->ptrDevImageInit, arraySize));
HANDLE_ERROR(hipMalloc(&this->ptrDevImageA, arraySize));
HANDLE_ERROR(hipMalloc(&this->ptrDevImageB, arraySize));
// Set a known value to any array representing an image
HANDLE_ERROR(hipMemset(this->ptrDevImageHeater, 0, arraySize));
HANDLE_ERROR(hipMemset(this->ptrDevImageInit, 0, arraySize));
HANDLE_ERROR(hipMemset(this->ptrDevImageA, 0, arraySize));
HANDLE_ERROR(hipMemset(this->ptrDevImageB, 0, arraySize));
// Copy images from CPU to GPU
HANDLE_ERROR(hipMemcpy(this->ptrDevImageHeater, imageHeater, arraySize, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(this->ptrDevImageInit, imageInit, arraySize, hipMemcpyHostToDevice));
this->ptrProcessFunction = isMultiGPU ? &HeatTransfertAdvanced::processMultiGPU : &HeatTransfertAdvanced::processSingleGPU;
}
HeatTransfertAdvanced::~HeatTransfertAdvanced()
{
// Release resources GPU side
HANDLE_ERROR(hipFree(this->ptrDevImageHeater));
HANDLE_ERROR(hipFree(this->ptrDevImageInit));
HANDLE_ERROR(hipFree(this->ptrDevImageA));
HANDLE_ERROR(hipFree(this->ptrDevImageB));
}
/**
* Override
*/
void HeatTransfertAdvanced::process(uchar4* ptrDevPixels, int width, int height)
{
(this->*ptrProcessFunction)(ptrDevPixels, width, height);
}
void HeatTransfertAdvanced::processSingleGPU(uchar4* ptrDevPixels, int width, int height)
{
float* ptrImageOutput;
float* ptrImageInputANew;
if (this->isBufferA)
{
ptrImageInputANew = this->ptrDevImageA;
ptrImageOutput = this->ptrDevImageB;
}
else
{
ptrImageInputANew = this->ptrDevImageB;
ptrImageOutput = this->ptrDevImageA;
}
hipLaunchKernelGGL(( diffuseAdvanced), dim3(this->dg), dim3(this->db), 0, 0, ptrImageInputANew, ptrImageOutput, this->width, this->height, this->propagationSpeed, this->computeMode);
hipLaunchKernelGGL(( crushAdvanced), dim3(this->dg), dim3(this->db), 0, 0, this->ptrDevImageHeater, ptrImageOutput, this->totalPixels);
// diffusePerPixel<<<this->dg, this->db>>>(ptrImageInputANew, ptrImageOutput, this->width, this->height, this->propagationSpeed, this->computeMode);
// crushPerPixel<<<this->dg, this->db>>>(this->ptrDevImageHeater, ptrImageOutput, this->totalPixels);
if(this->iteration % this->NB_ITERATION_AVEUGLE == 0)
{
hipLaunchKernelGGL(( displayAdvanced), dim3(this->dg), dim3(this->db), 0, 0, ptrImageOutput, ptrDevPixels, this->totalPixels);
// displayPerPixel<<<this->dg, this->db>>>(ptrImageOutput, ptrDevPixels, this->totalPixels);
}
this->isBufferA = !this->isBufferA;
}
void HeatTransfertAdvanced::processMultiGPU(uchar4* ptrDevPixels, int width, int height)
{
}
/**
* Override
*/
void HeatTransfertAdvanced::animationStep()
{
this->iteration = this->variateurN.varierAndGet();
}
/**
* Override
*/
float HeatTransfertAdvanced::getAnimationPara()
{
return this->iteration;
}
/**
* Override
*/
int HeatTransfertAdvanced::getW()
{
return this->width;
}
/**
* Override
*/
int HeatTransfertAdvanced::getH()
{
return this->height;
}
/**
* Override
*/
string HeatTransfertAdvanced::getTitle()
{
return this->title;
}
| d4f4cfb14686836be864c3266751840685394565.cu | #include <iostream>
#include <omp.h>
#include <climits>
#include "cuda_runtime.h"
#include "Device.h"
#include "IndiceTools.h"
#include "HeatTransfertAdvanced.h"
using cpu::IntervalI;
HeatTransfertAdvanced::HeatTransfertAdvanced(unsigned int width,
unsigned int height,
float propagationSpeed,
string title,
ComputeMode computeMode,
bool isMultiGPU) : variateurN(IntervalI(0, INT_MAX), 1)
{
// Inputs
this->width = width;
this->height = height;
this->totalPixels = width * height;
this->title = title;
this->computeMode = computeMode;
// Tools
this->iteration = 0;
this->propagationSpeed = propagationSpeed;
this->NB_ITERATION_AVEUGLE = 20;
this->isBufferA = true;
// Cuda grid dimensions
// this->db = dim3(32, 32, 1);
// this->dg = dim3(16, 16, 1);
// Check
Device::assertDim(this->dg, this->db);
Device::printAll();
float imageInit[this->totalPixels];
float imageHeater[this->totalPixels];
unsigned int s = 0;
while(s++ < this->totalPixels)
{
imageInit[s] = 0.0;
int i, j;
IndiceTools::toIJ(s, width, &i, &j);
if (i >= 191 && i < 319 && j >= 191 && j < 319)
{
imageHeater[s] = 1.0;
}
else if ((i >= 113 && i < 123 && j >= 113 && j < 123) || (i >= 113 && i < 123 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 113 && j < 123)
|| (i >= 387 && i < 397 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 387 && j < 397) || (i >= 387 && i < 397 && j >= 387 && j < 397))
{
imageHeater[s] = 0.2;
}
else
{
imageHeater[s] = 0.0;
}
}
// Size of all pixels of an image
size_t arraySize = sizeof(float) * this->totalPixels;
// Allocating memory to GPU
HANDLE_ERROR(cudaMalloc(&this->ptrDevImageHeater, arraySize));
HANDLE_ERROR(cudaMalloc(&this->ptrDevImageInit, arraySize));
HANDLE_ERROR(cudaMalloc(&this->ptrDevImageA, arraySize));
HANDLE_ERROR(cudaMalloc(&this->ptrDevImageB, arraySize));
// Set a known value to any array representing an image
HANDLE_ERROR(cudaMemset(this->ptrDevImageHeater, 0, arraySize));
HANDLE_ERROR(cudaMemset(this->ptrDevImageInit, 0, arraySize));
HANDLE_ERROR(cudaMemset(this->ptrDevImageA, 0, arraySize));
HANDLE_ERROR(cudaMemset(this->ptrDevImageB, 0, arraySize));
// Copy images from CPU to GPU
HANDLE_ERROR(cudaMemcpy(this->ptrDevImageHeater, imageHeater, arraySize, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(this->ptrDevImageInit, imageInit, arraySize, cudaMemcpyHostToDevice));
this->ptrProcessFunction = isMultiGPU ? &HeatTransfertAdvanced::processMultiGPU : &HeatTransfertAdvanced::processSingleGPU;
}
HeatTransfertAdvanced::~HeatTransfertAdvanced()
{
// Release resources GPU side
HANDLE_ERROR(cudaFree(this->ptrDevImageHeater));
HANDLE_ERROR(cudaFree(this->ptrDevImageInit));
HANDLE_ERROR(cudaFree(this->ptrDevImageA));
HANDLE_ERROR(cudaFree(this->ptrDevImageB));
}
/**
* Override
*/
void HeatTransfertAdvanced::process(uchar4* ptrDevPixels, int width, int height)
{
(this->*ptrProcessFunction)(ptrDevPixels, width, height);
}
void HeatTransfertAdvanced::processSingleGPU(uchar4* ptrDevPixels, int width, int height)
{
float* ptrImageOutput;
float* ptrImageInputANew;
if (this->isBufferA)
{
ptrImageInputANew = this->ptrDevImageA;
ptrImageOutput = this->ptrDevImageB;
}
else
{
ptrImageInputANew = this->ptrDevImageB;
ptrImageOutput = this->ptrDevImageA;
}
diffuseAdvanced<<<this->dg, this->db>>>(ptrImageInputANew, ptrImageOutput, this->width, this->height, this->propagationSpeed, this->computeMode);
crushAdvanced<<<this->dg, this->db>>>(this->ptrDevImageHeater, ptrImageOutput, this->totalPixels);
// diffusePerPixel<<<this->dg, this->db>>>(ptrImageInputANew, ptrImageOutput, this->width, this->height, this->propagationSpeed, this->computeMode);
// crushPerPixel<<<this->dg, this->db>>>(this->ptrDevImageHeater, ptrImageOutput, this->totalPixels);
if(this->iteration % this->NB_ITERATION_AVEUGLE == 0)
{
displayAdvanced<<<this->dg, this->db>>>(ptrImageOutput, ptrDevPixels, this->totalPixels);
// displayPerPixel<<<this->dg, this->db>>>(ptrImageOutput, ptrDevPixels, this->totalPixels);
}
this->isBufferA = !this->isBufferA;
}
void HeatTransfertAdvanced::processMultiGPU(uchar4* ptrDevPixels, int width, int height)
{
}
/**
* Override
*/
void HeatTransfertAdvanced::animationStep()
{
this->iteration = this->variateurN.varierAndGet();
}
/**
* Override
*/
float HeatTransfertAdvanced::getAnimationPara()
{
return this->iteration;
}
/**
* Override
*/
int HeatTransfertAdvanced::getW()
{
return this->width;
}
/**
* Override
*/
int HeatTransfertAdvanced::getH()
{
return this->height;
}
/**
* Override
*/
string HeatTransfertAdvanced::getTitle()
{
return this->title;
}
|
5166165a5077593151c2d60520d753807a313c3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Programa: watts_strogatz.cu
* Compilar
nvcc watts_strogatz.cu -o watts_strogatz
* Ejecutar.
./watts_strogatz n m p threads
Donde:
* n: es la cantidad de vrtices de la red
* m: es la cantidad de vrtices vecinos con los que se conecta cada nodo
* p: es la probabilidad de reconexin
* threads: es la cantidad de hilos por bloque de la dimensin x
Descripcin de problema:
Watts y Strogatz (1998) presentan un modelo para construir redes de mundo pequeo.
El algoritmo comienza definiendo una grfica regular en forma de un anillo con n vrtices y m aristas por vrtices.
Posteriormente, se reconectan las aristas de forma aleatoria con una probabilidad p. Cuando p=0, no hay reconexin,
as que la red queda como anillo, es decir, un red regular. En el caso que $p=1$, la reconexin de las aristas
genera una red aleatoria. Para valores intermedios 0<p<1, se genera una red de mundo pequeo.
Refencia:
Latora, V., Nicosia, V., and Russo, G. (2017). Complex networks: principles, methods
and applications. Cambridge University Press.
*/
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <omp.h>
using namespace std;
/* Definimos los parmetros del modelo
* N: es la cantidad de vrtices de la red
* M: es la cantidad de vrtices vecinos con los que se conecta cada nodo
* P: es la probabilidad de reconexin
* HILOS: es la cantidad de hilos por bloque de la dimensin x
*/
int N;
int M;
int P_int;
float P;
int HILOS;
// Definimos las variables para el clculo de tiempo de las ejecuciones
double serialTimer = 0.0;
float parallelTimer = 0.0;
// Definimos el arreglo en 1D que contendr los valores de la matriz de adyacencia
int *h_value;
int *d_value;
// Definimos el arreglo en 1D que contendr los indices de las columnas
int *h_colidx;
int *d_colidx;
// Definimos el arreglo en 1D que contendr los indices de las filas
int *h_rowidx;
int *d_rowidx;
// Definimos un arreglo que contendr nmeros aleatorios tipo float
float *h_aleatorios_float;
float *d_aleatorios_float;
// Definimos un arreglo que contendr nmeros aleatorios tipo int
int *h_aleatorios_int;
int *d_aleatorios_int;
// Definimos los arreglos en 1D de la ejecucin secuencial
int *secuencial_value;
int *secuencial_colidx;
int *secuencial_rowidx;
// Definimos los mtodos
void crea_anillo_cpu();
double get_random();
bool in_edges_cpu(int node, int edge);
void watts_strogatz_cpu();
void reconecta_cpu(int node,int edge_original,int edge);
void gpu_watts_strogatz();
// Funcin para buscar vecino del device
__device__ bool in_edges_gpu(int node,int edge,int *d_colidx,int M){
bool flag=false;
for(int i=0; i<(M*2); i++){
if(d_colidx[(node*M*2)+i]==edge){
flag = true;
}
}
return flag;
}
// Kernel gpu_crea_anillo
__global__ void gpu_crea_anillo(int *value,int *rowidx, int *colidx, int N,int M){
int rownum = blockIdx.x * blockDim.x + threadIdx.x;
int colnum = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int offset ;
offset = rownum*M*2 + M;
// Conexin con su l vecino hacia adelante
value[offset + colnum] = 1 ;
rowidx[offset + colnum] = rownum;
colidx[offset + colnum] = ( rownum + (colnum+1)) % N;
// Conexin con su l vecino hacia atrs
value[offset - (colnum+1)] = 1 ;
rowidx[offset - (colnum+1)] = rownum;
int vecino = rownum - (colnum+1);
if (vecino<0) {
colidx[offset - (colnum+1)] = N + vecino ;
}else{
colidx[offset - (colnum+1)] = vecino ;
}
}
// Kernel gpu_compute_watts_strogatz que realiza la reconexin de los vrtices
__global__ void gpu_compute_watts_strogatz(int *value,int *rowidx,int *colidx, float *aleatorios_float, int *aleatorios_int,int N,int M,float P){
int rownum = blockIdx.x * blockDim.x + threadIdx.x;
int colnum = blockIdx.y * blockDim.y + threadIdx.y;
if (rownum <N && colnum <M) {
int l = (rownum + (colnum+1)) % N;
int offset = rownum*M*2 + M;
if(aleatorios_float[rownum+colnum]<P){
bool flag = true;
int aumenta = 0;
while (flag) {
int edge_vecino = aleatorios_int[(rownum + colnum +aumenta)%N];
bool esvecino = in_edges_gpu(rownum,edge_vecino,colidx,M);
if (esvecino || (rownum ==edge_vecino) || (l ==edge_vecino)) {
flag = true;
aumenta +=1;
}else{
//printf("Reconectamos la edge (%d,%d) con el nodo %d\n",rownum,l,edge_vecino);
colidx[offset+colnum]=edge_vecino;
flag = false;
}
}
}
}
}
int main(int argc, char const *argv[]) {
//Recibimos los parmetros
N =atoi(argv[1]);
M = atoi(argv[2]);
P_int = atoi(argv[3]);
P = (float)P_int/10;
HILOS = atoi(argv[4]);
//iter = atoi(argv[5]);
// Reservamos memoria para los arreglos de la ejecucin secuencial
secuencial_value = (int*)malloc( N*M*2* sizeof(int));
secuencial_colidx = (int*)malloc( N*M*2* sizeof(int));
secuencial_rowidx = (int*)malloc( N*M*2* sizeof(int));
// Reservamos memoria para los arreglos del host
h_value = (int*)malloc( N*M*2* sizeof(int));
h_colidx = (int*)malloc( N*M*2* sizeof(int));
h_rowidx = (int*)malloc( N*M*2* sizeof(int));
h_aleatorios_float = (float*)malloc( N*M* sizeof(float));
h_aleatorios_int = (int*)malloc( N*M* sizeof(int));
// Inicializamos h_aleatorios_float y h_aleatorios_int con OpenMP
{
#pragma omp parallel for schedule(static)
for (int i = 0; i < (N*M); i++) {
h_aleatorios_float[i] = get_random() ;
h_aleatorios_int[i] = get_random() * N ;
}
}
// Ejecucin Serial
clock_t start = clock();
// Realizamos la ejecucin secuencial
crea_anillo_cpu();
watts_strogatz_cpu();
clock_t end = clock();
// Imprimimos el tiempo de ejecucin serial en segundos
serialTimer = double (end-start) / double(CLOCKS_PER_SEC);
cout << "Tiempo serial: " << serialTimer << endl;
// Realizamos la ejecucin paralela
gpu_watts_strogatz();
// Imprimimos el tiempo de ejecucin paralela en segundos
cout << "Paralela: " <<(parallelTimer/1000) <<endl;
// Imprimimos el speedup
cout << "Speed-up: " << serialTimer / (parallelTimer /1000)<< "X"<<endl;
//printf("%d,%d,%d,%d,%f,%f,%f,%f\n",iter,N,M,HILOS,P,serialTimer,(parallelTimer /1000), (serialTimer / (parallelTimer /1000)));
// Liberamos memoria
free(h_value); free(h_colidx); free(h_rowidx); free(secuencial_value); free(secuencial_colidx); free(secuencial_rowidx);free(h_aleatorios_float); free(h_aleatorios_int);
hipFree(d_value); hipFree(d_colidx); hipFree(d_rowidx); hipFree(d_aleatorios_int); hipFree(d_aleatorios_float);
return 0;
}
/*
Mtodos de la ejecucin secuencial:
* void crea_anillo_cpu()
* void watts_strogatz_cpu()
* bool in_edges_cpu(int node,int edge)
* void reconecta_cpu(int node,int edge_original,int edge)
*/
// Genera el anillo
void crea_anillo_cpu() {
for (int i = 0; i < N; i++) {
int offset = i*M*2 + M;
for (int j = 1; j <= M; j++) {
// Conexin con su l vecino hacia adelante
secuencial_value[offset + (j-1)] = 1 ;
secuencial_rowidx[offset + (j-1)] = i;
secuencial_colidx[offset +(j-1)] = ( i + j) % N;
// Conexin con su l vecino hacia atrs
secuencial_value[offset - j] = 1 ;
secuencial_rowidx[offset - j] = i;
int vecino = i - j;
if (vecino<0) {
secuencial_colidx[offset - j] = N + vecino ;
}else{
secuencial_colidx[offset - j] = vecino ;
}
}
}
}
// Realiza la reconexin de aristas
void watts_strogatz_cpu(){
for (int i = 0; i < N; i++) {
for (int j = 1; j <= M; j++) {
int l = (i+j) % N;
if(h_aleatorios_float[i+(j-1)]<P){
bool flag = true;
int aumenta = 0;
while (flag) {
int edge_vecino = h_aleatorios_int[(i+(j-1)+aumenta)% N];
if((in_edges_cpu(i,edge_vecino)) || (i ==edge_vecino) || (l==edge_vecino)){
flag = true;
aumenta +=1;
}else{
reconecta_cpu(i,l,edge_vecino);
flag = false;
}
}
}
}
}
}
// Genera nmeros aleatorios de una distribucin uniforme
double get_random() { return ((double)rand() / (double)RAND_MAX); }
// Regresa true si ya existe la arista
bool in_edges_cpu(int node,int edge){
int *edges;
edges = (int*)malloc(M*2* sizeof(int));
for(int i=0; i <(M*2); i++){
edges[i]=secuencial_colidx[(node*M*2)+i];
}
bool flag=false;
for(int i=0; i<(M*2); i++){
if(edges[i]==edge){
flag = true;
}
}
return flag;
}
// Realiza la reconexin de la arista
void reconecta_cpu(int node,int edge_original,int edge){
for(int i=0; i <(M*2); i++){
if(secuencial_colidx[(node*M*2)+i]==edge_original){
secuencial_colidx[(node*M*2)+i]=edge;
}
}
}
/*
Ejecucin paralela
*/
void gpu_watts_strogatz() {
// Reservar memoria en device
hipMalloc((void **)&d_value, N*M*2* sizeof(int));
hipMalloc((void **)&d_colidx, N*M*2* sizeof(int));
hipMalloc((void **)&d_rowidx, N*M*2*sizeof(int));
hipMalloc((void **)&d_aleatorios_float, N*M*sizeof(float));
hipMalloc((void **)&d_aleatorios_int, N*M*sizeof(int));
// Transferir datos de host h_a device
hipMemcpy(d_aleatorios_float, h_aleatorios_float, N*M*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_aleatorios_int, h_aleatorios_int, N*M*sizeof(int), hipMemcpyHostToDevice);
// Definimos los bloques de la dimensin x
int blocks = ceil(N / HILOS) + 1;
int threads = HILOS;
// Definimos los timers para el tiempo de ejecucin
hipEvent_t start, stop;
// events to take time
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// Definimos las estructuras que contienen los bloques y los hilos por bloque
dim3 dimGrid(blocks,1, 1);
dim3 dimBlock(threads, M);
// Llamamos al kernel para crear el anillo en el device
hipLaunchKernelGGL(( gpu_crea_anillo), dim3(dimGrid),dim3(dimBlock), 0, 0, d_value,d_rowidx,d_colidx,N,M);
// Llamamos al kernel para realizar la desconexin de las aristas
hipLaunchKernelGGL(( gpu_compute_watts_strogatz), dim3(dimGrid), dim3(dimBlock), 0, 0, d_value,d_rowidx,d_colidx,d_aleatorios_float,d_aleatorios_int,N,M,P);
// Transferimo los arreglos que representan la matriz de adyacencia en COO del device al host
hipMemcpy(h_value, d_value, N*M*2* sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_rowidx, d_rowidx, N*M*2* sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_colidx, d_colidx, N*M*2* sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(¶llelTimer, start, stop);
}
| 5166165a5077593151c2d60520d753807a313c3c.cu | /*
Programa: watts_strogatz.cu
* Compilar
nvcc watts_strogatz.cu -o watts_strogatz
* Ejecutar.
./watts_strogatz n m p threads
Donde:
* n: es la cantidad de vértices de la red
* m: es la cantidad de vértices vecinos con los que se conecta cada nodo
* p: es la probabilidad de reconexión
* threads: es la cantidad de hilos por bloque de la dimensión x
Descripción de problema:
Watts y Strogatz (1998) presentan un modelo para construir redes de mundo pequeño.
El algoritmo comienza definiendo una gráfica regular en forma de un anillo con n vértices y m aristas por vértices.
Posteriormente, se reconectan las aristas de forma aleatoria con una probabilidad p. Cuando p=0, no hay reconexión,
así que la red queda como anillo, es decir, un red regular. En el caso que $p=1$, la reconexión de las aristas
genera una red aleatoria. Para valores intermedios 0<p<1, se genera una red de mundo pequeño.
Refencia:
Latora, V., Nicosia, V., and Russo, G. (2017). Complex networks: principles, methods
and applications. Cambridge University Press.
*/
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#include <omp.h>
using namespace std;
/* Definimos los parámetros del modelo
* N: es la cantidad de vértices de la red
* M: es la cantidad de vértices vecinos con los que se conecta cada nodo
* P: es la probabilidad de reconexión
* HILOS: es la cantidad de hilos por bloque de la dimensión x
*/
int N;
int M;
int P_int;
float P;
int HILOS;
// Definimos las variables para el cálculo de tiempo de las ejecuciones
double serialTimer = 0.0;
float parallelTimer = 0.0;
// Definimos el arreglo en 1D que contendrá los valores de la matriz de adyacencia
int *h_value;
int *d_value;
// Definimos el arreglo en 1D que contendrá los indices de las columnas
int *h_colidx;
int *d_colidx;
// Definimos el arreglo en 1D que contendrá los indices de las filas
int *h_rowidx;
int *d_rowidx;
// Definimos un arreglo que contendrá números aleatorios tipo float
float *h_aleatorios_float;
float *d_aleatorios_float;
// Definimos un arreglo que contendrá números aleatorios tipo int
int *h_aleatorios_int;
int *d_aleatorios_int;
// Definimos los arreglos en 1D de la ejecución secuencial
int *secuencial_value;
int *secuencial_colidx;
int *secuencial_rowidx;
// Definimos los métodos
void crea_anillo_cpu();
double get_random();
bool in_edges_cpu(int node, int edge);
void watts_strogatz_cpu();
void reconecta_cpu(int node,int edge_original,int edge);
void gpu_watts_strogatz();
// Función para buscar vecino del device
__device__ bool in_edges_gpu(int node,int edge,int *d_colidx,int M){
bool flag=false;
for(int i=0; i<(M*2); i++){
if(d_colidx[(node*M*2)+i]==edge){
flag = true;
}
}
return flag;
}
// Kernel gpu_crea_anillo
__global__ void gpu_crea_anillo(int *value,int *rowidx, int *colidx, int N,int M){
int rownum = blockIdx.x * blockDim.x + threadIdx.x;
int colnum = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int offset ;
offset = rownum*M*2 + M;
// Conexión con su l vecino hacia adelante
value[offset + colnum] = 1 ;
rowidx[offset + colnum] = rownum;
colidx[offset + colnum] = ( rownum + (colnum+1)) % N;
// Conexión con su l vecino hacia atrás
value[offset - (colnum+1)] = 1 ;
rowidx[offset - (colnum+1)] = rownum;
int vecino = rownum - (colnum+1);
if (vecino<0) {
colidx[offset - (colnum+1)] = N + vecino ;
}else{
colidx[offset - (colnum+1)] = vecino ;
}
}
// Kernel gpu_compute_watts_strogatz que realiza la reconexión de los vértices
__global__ void gpu_compute_watts_strogatz(int *value,int *rowidx,int *colidx, float *aleatorios_float, int *aleatorios_int,int N,int M,float P){
int rownum = blockIdx.x * blockDim.x + threadIdx.x;
int colnum = blockIdx.y * blockDim.y + threadIdx.y;
if (rownum <N && colnum <M) {
int l = (rownum + (colnum+1)) % N;
int offset = rownum*M*2 + M;
if(aleatorios_float[rownum+colnum]<P){
bool flag = true;
int aumenta = 0;
while (flag) {
int edge_vecino = aleatorios_int[(rownum + colnum +aumenta)%N];
bool esvecino = in_edges_gpu(rownum,edge_vecino,colidx,M);
if (esvecino || (rownum ==edge_vecino) || (l ==edge_vecino)) {
flag = true;
aumenta +=1;
}else{
//printf("Reconectamos la edge (%d,%d) con el nodo %d\n",rownum,l,edge_vecino);
colidx[offset+colnum]=edge_vecino;
flag = false;
}
}
}
}
}
int main(int argc, char const *argv[]) {
//Recibimos los parámetros
N =atoi(argv[1]);
M = atoi(argv[2]);
P_int = atoi(argv[3]);
P = (float)P_int/10;
HILOS = atoi(argv[4]);
//iter = atoi(argv[5]);
// Reservamos memoria para los arreglos de la ejecución secuencial
secuencial_value = (int*)malloc( N*M*2* sizeof(int));
secuencial_colidx = (int*)malloc( N*M*2* sizeof(int));
secuencial_rowidx = (int*)malloc( N*M*2* sizeof(int));
// Reservamos memoria para los arreglos del host
h_value = (int*)malloc( N*M*2* sizeof(int));
h_colidx = (int*)malloc( N*M*2* sizeof(int));
h_rowidx = (int*)malloc( N*M*2* sizeof(int));
h_aleatorios_float = (float*)malloc( N*M* sizeof(float));
h_aleatorios_int = (int*)malloc( N*M* sizeof(int));
// Inicializamos h_aleatorios_float y h_aleatorios_int con OpenMP
{
#pragma omp parallel for schedule(static)
for (int i = 0; i < (N*M); i++) {
h_aleatorios_float[i] = get_random() ;
h_aleatorios_int[i] = get_random() * N ;
}
}
// Ejecución Serial
clock_t start = clock();
// Realizamos la ejecución secuencial
crea_anillo_cpu();
watts_strogatz_cpu();
clock_t end = clock();
// Imprimimos el tiempo de ejecución serial en segundos
serialTimer = double (end-start) / double(CLOCKS_PER_SEC);
cout << "Tiempo serial: " << serialTimer << endl;
// Realizamos la ejecución paralela
gpu_watts_strogatz();
// Imprimimos el tiempo de ejecución paralela en segundos
cout << "Paralela: " <<(parallelTimer/1000) <<endl;
// Imprimimos el speedup
cout << "Speed-up: " << serialTimer / (parallelTimer /1000)<< "X"<<endl;
//printf("%d,%d,%d,%d,%f,%f,%f,%f\n",iter,N,M,HILOS,P,serialTimer,(parallelTimer /1000), (serialTimer / (parallelTimer /1000)));
// Liberamos memoria
free(h_value); free(h_colidx); free(h_rowidx); free(secuencial_value); free(secuencial_colidx); free(secuencial_rowidx);free(h_aleatorios_float); free(h_aleatorios_int);
cudaFree(d_value); cudaFree(d_colidx); cudaFree(d_rowidx); cudaFree(d_aleatorios_int); cudaFree(d_aleatorios_float);
return 0;
}
/*
Métodos de la ejecución secuencial:
* void crea_anillo_cpu()
* void watts_strogatz_cpu()
* bool in_edges_cpu(int node,int edge)
* void reconecta_cpu(int node,int edge_original,int edge)
*/
// Genera el anillo
void crea_anillo_cpu() {
for (int i = 0; i < N; i++) {
int offset = i*M*2 + M;
for (int j = 1; j <= M; j++) {
// Conexión con su l vecino hacia adelante
secuencial_value[offset + (j-1)] = 1 ;
secuencial_rowidx[offset + (j-1)] = i;
secuencial_colidx[offset +(j-1)] = ( i + j) % N;
// Conexión con su l vecino hacia atrás
secuencial_value[offset - j] = 1 ;
secuencial_rowidx[offset - j] = i;
int vecino = i - j;
if (vecino<0) {
secuencial_colidx[offset - j] = N + vecino ;
}else{
secuencial_colidx[offset - j] = vecino ;
}
}
}
}
// Realiza la reconexión de aristas
void watts_strogatz_cpu(){
for (int i = 0; i < N; i++) {
for (int j = 1; j <= M; j++) {
int l = (i+j) % N;
if(h_aleatorios_float[i+(j-1)]<P){
bool flag = true;
int aumenta = 0;
while (flag) {
int edge_vecino = h_aleatorios_int[(i+(j-1)+aumenta)% N];
if((in_edges_cpu(i,edge_vecino)) || (i ==edge_vecino) || (l==edge_vecino)){
flag = true;
aumenta +=1;
}else{
reconecta_cpu(i,l,edge_vecino);
flag = false;
}
}
}
}
}
}
// Genera números aleatorios de una distribución uniforme
double get_random() { return ((double)rand() / (double)RAND_MAX); }
// Regresa true si ya existe la arista
bool in_edges_cpu(int node,int edge){
int *edges;
edges = (int*)malloc(M*2* sizeof(int));
for(int i=0; i <(M*2); i++){
edges[i]=secuencial_colidx[(node*M*2)+i];
}
bool flag=false;
for(int i=0; i<(M*2); i++){
if(edges[i]==edge){
flag = true;
}
}
return flag;
}
// Realiza la reconexión de la arista
void reconecta_cpu(int node,int edge_original,int edge){
for(int i=0; i <(M*2); i++){
if(secuencial_colidx[(node*M*2)+i]==edge_original){
secuencial_colidx[(node*M*2)+i]=edge;
}
}
}
/*
Ejecución paralela
*/
void gpu_watts_strogatz() {
// Reservar memoria en device
cudaMalloc((void **)&d_value, N*M*2* sizeof(int));
cudaMalloc((void **)&d_colidx, N*M*2* sizeof(int));
cudaMalloc((void **)&d_rowidx, N*M*2*sizeof(int));
cudaMalloc((void **)&d_aleatorios_float, N*M*sizeof(float));
cudaMalloc((void **)&d_aleatorios_int, N*M*sizeof(int));
// Transferir datos de host h_a device
cudaMemcpy(d_aleatorios_float, h_aleatorios_float, N*M*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_aleatorios_int, h_aleatorios_int, N*M*sizeof(int), cudaMemcpyHostToDevice);
// Definimos los bloques de la dimensión x
int blocks = ceil(N / HILOS) + 1;
int threads = HILOS;
// Definimos los timers para el tiempo de ejecución
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// Definimos las estructuras que contienen los bloques y los hilos por bloque
dim3 dimGrid(blocks,1, 1);
dim3 dimBlock(threads, M);
// Llamamos al kernel para crear el anillo en el device
gpu_crea_anillo<<<dimGrid,dimBlock>>>(d_value,d_rowidx,d_colidx,N,M);
// Llamamos al kernel para realizar la desconexión de las aristas
gpu_compute_watts_strogatz<<<dimGrid, dimBlock>>>(d_value,d_rowidx,d_colidx,d_aleatorios_float,d_aleatorios_int,N,M,P);
// Transferimo los arreglos que representan la matriz de adyacencia en COO del device al host
cudaMemcpy(h_value, d_value, N*M*2* sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_rowidx, d_rowidx, N*M*2* sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_colidx, d_colidx, N*M*2* sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(¶llelTimer, start, stop);
}
|
e08a30ed4d497fa1de8d7f3bccb87077c0230cc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "mdp_structs.h"
#define MDP_INFINITY 999999999
struct timeval StartingTime;
void setTime(){
gettimeofday( &StartingTime, NULL );
}
double getTime(){
struct timeval PausingTime, ElapsedTime;
gettimeofday( &PausingTime, NULL );
timersub(&PausingTime, &StartingTime, &ElapsedTime);
return ElapsedTime.tv_sec*1000.0+ElapsedTime.tv_usec/1000.0; // Returning in milliseconds.
}
__global__ void mdp_sum_actions(
const int numtransitions,
const int numactions,
const struct transition * tmodel,
const float * util_prev,
float * action_utils) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int thread_num = gridDim.x * blockDim.x;
int iter = numtransitions % thread_num ? numtransitions / thread_num + 1 : numtransitions / thread_num;
// Each thread is assigned a transition
// In each thread we must update the utility values of each action for each state
for (int i = 0; i < iter; i++) {
int dataid = thread_id + i * thread_num;
if (dataid < numtransitions) {
int action = tmodel[dataid].a;
int state = tmodel[dataid].s;
int tstate = tmodel[dataid].sp;
atomicAdd(&action_utils[state * numactions + action], tmodel[dataid].p * util_prev[tstate]);
}
}
}
__global__ void mdp_update_util(
const int numstates,
const int numactions,
const float discount,
const struct reward * reward_def,
float * util_curr,
const float * action_utils) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int thread_num = gridDim.x * blockDim.x;
int iter = numstates % thread_num ? numstates / thread_num + 1 : numstates / thread_num;
// Once action util values are calculated, select the max to update the utility of the state s
// Here we consider each thread as a state rather than as a transition (above)
for (int i = 0; i < iter; i++) {
int state = thread_id + i * thread_num;
if (state < numstates) {
float amax = -MDP_INFINITY;
for (int j = 0; j < numactions; j++) {
amax = max(amax, action_utils[state * numactions + j]);
}
util_curr[state] = reward_def[state].reward + discount * amax;
}
}
}
double mdp(
const int numstates,
const int numtransitions,
const int numactions,
const float epsilon,
const float discount,
const int numBlocks,
const int blockSize,
const struct transition * tmodel,
const struct reward * reward_def,
float * util_curr) {
double elapsed_time = 0.0;
float * util_prev = (float *)malloc(sizeof(float) * numstates);
memcpy(util_prev, util_curr, sizeof(float) * numstates);
struct transition * d_tmodel;
struct reward * d_reward_def;
float * d_util_prev;
float * d_util_curr;
float * d_action_utils;
hipMalloc((void **)&d_tmodel, numtransitions * sizeof(struct transition));
hipMalloc((void **)&d_reward_def, numstates * sizeof(struct reward));
hipMalloc((void **)&d_util_prev, numstates * sizeof(float));
hipMalloc((void **)&d_util_curr, numstates * sizeof(float));
hipMalloc((void **)&d_action_utils, numstates * numactions * sizeof(float));
hipMemcpy(d_tmodel, tmodel, numtransitions * sizeof(struct transition), hipMemcpyHostToDevice);
hipMemcpy(d_reward_def, reward_def, numstates * sizeof(struct reward), hipMemcpyHostToDevice);
hipMemcpy(d_util_prev, util_prev, numstates * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_util_curr, util_curr, numstates * sizeof(float), hipMemcpyHostToDevice);
float delta = -1.0;
do {
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_prev[i]);
// }
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_curr[i]);
// }
//setTime();
hipMemset(d_action_utils, 0, numstates * numactions * sizeof(float));
setTime();
hipLaunchKernelGGL(( mdp_sum_actions), dim3(numBlocks), dim3(blockSize), 0, 0,
numtransitions, numactions, d_tmodel, d_util_prev, d_action_utils);
hipDeviceSynchronize();
//std::cout << "Took: " << getTime() << " ms." << std::endl;
///setTime();
hipLaunchKernelGGL(( mdp_update_util), dim3(numBlocks), dim3(blockSize), 0, 0,
numstates, numactions, discount, d_reward_def, d_util_curr, d_action_utils);
//hipDeviceSynchronize();
//std::cout << "Took: " << getTime() << " ms." << std::endl;
elapsed_time += getTime();
hipMemcpy(util_curr, d_util_curr, numstates * sizeof(float), hipMemcpyDeviceToHost);
//sleep(1);
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_prev[i]);
// }
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_curr[i]);
// }
delta = -1.0;
for (int i = 0; i < numstates; i++) {
delta = max(delta, abs(util_curr[i] - util_prev[i]));
}
if (delta < epsilon * (1.0 - discount) / discount) {
break;
}
float * temp = util_prev;
util_prev = util_curr;
util_curr = temp;
hipMemcpy(d_util_prev, util_prev, numstates * sizeof(float), hipMemcpyHostToDevice);
} while (true);
return elapsed_time;
} | e08a30ed4d497fa1de8d7f3bccb87077c0230cc7.cu | #include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "mdp_structs.h"
#define MDP_INFINITY 999999999
struct timeval StartingTime;
void setTime(){
gettimeofday( &StartingTime, NULL );
}
double getTime(){
struct timeval PausingTime, ElapsedTime;
gettimeofday( &PausingTime, NULL );
timersub(&PausingTime, &StartingTime, &ElapsedTime);
return ElapsedTime.tv_sec*1000.0+ElapsedTime.tv_usec/1000.0; // Returning in milliseconds.
}
__global__ void mdp_sum_actions(
const int numtransitions,
const int numactions,
const struct transition * tmodel,
const float * util_prev,
float * action_utils) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int thread_num = gridDim.x * blockDim.x;
int iter = numtransitions % thread_num ? numtransitions / thread_num + 1 : numtransitions / thread_num;
// Each thread is assigned a transition
// In each thread we must update the utility values of each action for each state
for (int i = 0; i < iter; i++) {
int dataid = thread_id + i * thread_num;
if (dataid < numtransitions) {
int action = tmodel[dataid].a;
int state = tmodel[dataid].s;
int tstate = tmodel[dataid].sp;
atomicAdd(&action_utils[state * numactions + action], tmodel[dataid].p * util_prev[tstate]);
}
}
}
__global__ void mdp_update_util(
const int numstates,
const int numactions,
const float discount,
const struct reward * reward_def,
float * util_curr,
const float * action_utils) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int thread_num = gridDim.x * blockDim.x;
int iter = numstates % thread_num ? numstates / thread_num + 1 : numstates / thread_num;
// Once action util values are calculated, select the max to update the utility of the state s
// Here we consider each thread as a state rather than as a transition (above)
for (int i = 0; i < iter; i++) {
int state = thread_id + i * thread_num;
if (state < numstates) {
float amax = -MDP_INFINITY;
for (int j = 0; j < numactions; j++) {
amax = max(amax, action_utils[state * numactions + j]);
}
util_curr[state] = reward_def[state].reward + discount * amax;
}
}
}
double mdp(
const int numstates,
const int numtransitions,
const int numactions,
const float epsilon,
const float discount,
const int numBlocks,
const int blockSize,
const struct transition * tmodel,
const struct reward * reward_def,
float * util_curr) {
double elapsed_time = 0.0;
float * util_prev = (float *)malloc(sizeof(float) * numstates);
memcpy(util_prev, util_curr, sizeof(float) * numstates);
struct transition * d_tmodel;
struct reward * d_reward_def;
float * d_util_prev;
float * d_util_curr;
float * d_action_utils;
cudaMalloc((void **)&d_tmodel, numtransitions * sizeof(struct transition));
cudaMalloc((void **)&d_reward_def, numstates * sizeof(struct reward));
cudaMalloc((void **)&d_util_prev, numstates * sizeof(float));
cudaMalloc((void **)&d_util_curr, numstates * sizeof(float));
cudaMalloc((void **)&d_action_utils, numstates * numactions * sizeof(float));
cudaMemcpy(d_tmodel, tmodel, numtransitions * sizeof(struct transition), cudaMemcpyHostToDevice);
cudaMemcpy(d_reward_def, reward_def, numstates * sizeof(struct reward), cudaMemcpyHostToDevice);
cudaMemcpy(d_util_prev, util_prev, numstates * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_util_curr, util_curr, numstates * sizeof(float), cudaMemcpyHostToDevice);
float delta = -1.0;
do {
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_prev[i]);
// }
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_curr[i]);
// }
//setTime();
cudaMemset(d_action_utils, 0, numstates * numactions * sizeof(float));
setTime();
mdp_sum_actions<<<numBlocks, blockSize>>>(
numtransitions, numactions, d_tmodel, d_util_prev, d_action_utils);
cudaDeviceSynchronize();
//std::cout << "Took: " << getTime() << " ms." << std::endl;
///setTime();
mdp_update_util<<<numBlocks, blockSize>>>(
numstates, numactions, discount, d_reward_def, d_util_curr, d_action_utils);
//cudaDeviceSynchronize();
//std::cout << "Took: " << getTime() << " ms." << std::endl;
elapsed_time += getTime();
cudaMemcpy(util_curr, d_util_curr, numstates * sizeof(float), cudaMemcpyDeviceToHost);
//sleep(1);
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_prev[i]);
// }
// for (int i = 0; i < numstates; i++) {
// printf("%f\n", util_curr[i]);
// }
delta = -1.0;
for (int i = 0; i < numstates; i++) {
delta = max(delta, abs(util_curr[i] - util_prev[i]));
}
if (delta < epsilon * (1.0 - discount) / discount) {
break;
}
float * temp = util_prev;
util_prev = util_curr;
util_curr = temp;
cudaMemcpy(d_util_prev, util_prev, numstates * sizeof(float), cudaMemcpyHostToDevice);
} while (true);
return elapsed_time;
} |
a19157d7c4049b4085bd353e1694712424e6eaaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <cmath>
#include <float.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "utility.cuh"
#include "parameterData.h"
#include "liggghtsData.h"
#include "compartment.cuh"
using namespace std;
#define TWOWAYCOUPLING false
// MACROS
// Calling macros for error check and dump data to files to VaribleName.txt
#define DUMP(varName) dumpData(varName, #varName)
#define DUMP2D(varName) dump2DData(varName, #varName)
#define DUMP3D(varName) dump3DData(varName, #varName)
#define DUMPCSV(varName) dumpCSV(varName, #varName)
#define DUMP2DCSV(varName) dump2DCSV(varName, #varName)
#define DUMP3DCSV(varName) dump3DCSV(varName, #varName)
#define DUMPDIACSV(time, dia) dumpDiaCSV(time, dia, #dia)
#define DUMP2DCSV4MATLAB(varName) dump2DCSV4Matlab(varName, #varName)
// extern __shared__ double *d_sMeshXY, *d_ssMeshXY;
// ==================================== INITIALIZATION KERNEL ===================================================
__global__ void initialization_kernel(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
// __shared__ double d_sMeshXY[256], d_ssMeshXY[256];
d_sMeshXY[bdx * bix + idx] = d_vs[bix];
d_ssMeshXY[bdx * bix + idx] = d_vss[idx];
d_sAgg[bdx * bix + idx] = d_vs[idx] + d_vs[bix];
d_ssAgg[bdx * bix + idx] = d_vss[idx] + d_vss[bix];
d_sAggregationCheck[bdx * bix + idx] = d_sAgg[bdx * bix + idx] <= d_vs[bdx - 1] ? 1 : 0;
d_ssAggregationCheck[bdx * bix + idx] = d_ssAgg[bdx * bix + idx] <= d_vss[bdx - 1] ? 1 : 0;
d_sLow [bdx * bix + idx] = d_sMeshXY[bdx * bix + idx];
d_ssLow[bdx * bix + idx] = d_ssMeshXY[bdx * bix + idx];
__syncthreads();
if (bix < bdx -1)
d_sHigh[bdx * bix + idx] = d_sMeshXY[bdx * (bix + 1) + idx];
d_ssHigh[bdx * bix + idx] = d_ssMeshXY[bdx * (bix) + idx + 1];
d_sHigh[bdx * (bdx -1) + idx] = 0.0;
d_ssHigh[bdx * bix + bdx - 1] = 0.0;
d_sLoc[bdx * bix + idx] = floor(log(d_sAgg[bdx * bix + idx] / fsVolCoeff) / log(fsVolBase) + 1);
d_ssLoc[bdx * bix + idx] = floor(log(d_ssAgg[bdx * bix + idx] / ssVolCoeff) / log(ssVolBase) + 1);
d_sInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
d_ssInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
__syncthreads();
double value = d_vs[idx] - d_vs[bix];
double value1 = d_vss[idx] - d_vss[bix];
d_sBreak[bdx * bix + idx] = value < 0.0 ? 0.0 : value;
d_ssBreak[bdx * bix + idx] = value1 < 0.0 ? 0.0 : value1;
__syncthreads();
}
__global__ void initialization_kernel2(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
d_sLocBreak[bdx * bix + idx] = (d_sBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_sBreak[bdx * idx + bix] / fsVolCoeff) / log(fsVolBase) + 1));
d_ssLocBreak[bdx * bix + idx] = (d_ssBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_ssBreak[bdx * idx + bix] / ssVolCoeff) / log(ssVolBase) + 1));
__syncthreads();
d_sCheckB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_ssCheckB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_sIndB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx];
d_ssIndB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx];
if (d_sIndB[bdx * bix + idx] < 1)
d_sIndB[bdx * bix + idx] = bdx + 1;
if (d_ssIndB[bdx * bix + idx] < 1)
d_ssIndB[bdx * bix + idx] = bdx + 1;
__syncthreads();
}
// ================================= COMPARTMENT LAUNCH KERNEL ============================================================
__global__ void launchCompartment(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, double time, double timeStep, double initialTime,
double *d_fAllCompartments, double *d_flAllCompartments, double *d_fgAllCompartments, double *d_liquidAdditionRateAllCompartments,
unsigned int size2D, unsigned int size3D, unsigned int size4D, double *d_fIn, double initPorosity, double demTimeStep, int nFirstSolidBins, int nSecondSolidBins,
double granulatorLength, double partticleResTime, double premixTime, double liqAddTime, double consConst, double minPorosity, int nCompartments,
double granSatFactor, double aggKernelConst, double brkKernelConst)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
if (tix ==0)
{
d_compartmentOut->formationThroughAggregation[bix] = 0.0;
d_compartmentOut->depletionThroughAggregation[bix] = 0.0;
d_compartmentOut->formationThroughBreakage[bix] = 0.0;
d_compartmentOut->depletionThroughBreakage[bix] = 0.0;
}
// int tiy = threadIdx.y;
d_compartmentOut->dfAlldt[idx3] = 0.0;
d_compartmentOut->dfLiquiddt[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = 0.0;
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
d_compartmentOut->internalVolumeBins[idx3] = 0.0;
d_compartmentOut->externalVolumeBins[idx3] = 0.0;
// int idx = bix * bdx * bdy + tiy * bdx + tix;
//if (tiy == 0)
d_compartmentIn->fAll[idx3] = d_fAllCompartments[idx3];
d_compartmentIn->fLiquid[idx3] = d_flAllCompartments[idx3];
d_compartmentIn->fGas[idx3] = d_fgAllCompartments[idx3];
d_compartmentIn->liquidAdditionRate[bix] = d_liquidAdditionRateAllCompartments[bix];
if (bix == 0)
{
d_prevCompInData->fAllPreviousCompartment[idx3] = 0.0;
d_prevCompInData->flPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fgPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fAllComingIn[idx3] = d_fIn[tix];
d_prevCompInData->fgComingIn[idx3] = 0.0;
double value = initPorosity * timeStep;
d_prevCompInData->fgComingIn[idx3] = d_fIn[tix] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) * value;
}
else
{
d_prevCompInData->fAllPreviousCompartment[idx3] = d_fAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->flPreviousCompartment[idx3] = d_flAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fgPreviousCompartment[idx3] = d_fgAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fAllComingIn[idx3] = 0.0;
d_prevCompInData->fgComingIn[idx3] = 0.0;
}
__syncthreads();
if (fabs(d_compartmentIn->fAll[idx3]) > 1e-16)
{
d_compartmentOut->liquidBins[idx3] = d_compartmentIn->fLiquid[idx3] / d_compartmentIn->fAll[idx3];
d_compartmentOut->gasBins[idx3] = d_compartmentIn->fGas[idx3] / d_compartmentIn->fAll[idx3];
}
else
{
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
}
d_aggCompVar->depletionThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfGasThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfLiquidThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->liquidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->gasBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthAggLowHigh[idx3] = 0.0;
d_aggCompVar->birthAggLowHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowHighGas[idx3] = 0.0;
d_aggCompVar->birthAggHighLow[idx3] = 0.0;
d_aggCompVar->birthAggHighLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighLowGas[idx3] = 0.0;
d_aggCompVar->birthAggLowLow[idx3] = 0.0;
d_aggCompVar->birthAggLowLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowLowGas[idx3] = 0.0;
d_aggCompVar->birthAggHighHigh[idx3] = 0.0;
d_aggCompVar->birthAggHighHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighHighGas[idx3] = 0.0;
d_aggCompVar->formationThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfGasThroughAggregationCA[idx3] = 0.0;
d_brCompVar->fractionBreakage00[idx3] = 0.0;
d_brCompVar->fractionBreakage01[idx3] = 0.0;
d_brCompVar->fractionBreakage10[idx3] = 0.0;
d_brCompVar->fractionBreakage11[idx3] = 0.0;
d_brCompVar->transferThroughLiquidAddition[idx3] = 0.0;
d_brCompVar->transferThroughConsolidation[idx3] = 0.0;
d_brCompVar->depletionThroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfLiquidthroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfGasThroughBreakage[idx3] = 0.0;
d_brCompVar->birthThroughBreakage1[idx3] = 0.0;
d_brCompVar->birthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->formationThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfLiquidThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfGasThroughBreakageCA[idx3] = 0.0;
d_compVar->internalLiquid[idx3] = 0.0;
d_compVar->externalLiquid[idx3] = 0.0;
d_compVar->externalLiquidContent[idx3] = 0.0;
d_compVar->volumeBins[tix] = 0.0;
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compVar->meshXYSum[tix] = 0.0;
d_compVar->totalSolidvolume[bix] = 0.0;
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compVar->internalLiquid[idx3] = min((granSatFactor * d_compartmentOut->gasBins[idx3]), d_compartmentOut->liquidBins[idx3]);
d_compVar->externalLiquid[idx3] = max(0.0, (d_compartmentOut->liquidBins[idx3] - d_compVar->internalLiquid[idx3]));
d_compartmentOut->internalVolumeBins[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix] + d_compVar->internalLiquid[idx3] + d_compartmentOut->gasBins[idx3];
d_compVar->meshXYSum[tix] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix];
__syncthreads();
}
// printf("d_compartmentOut->liquidBins = %f \n", d_compartmentOut->liquidBins[tix]);
__global__ void consolidationAndMovementCalcs(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, int nCompartments, double granulatorLength, double partticleResTime,
double time, double timeStep, double premixTime, double liqAddTime, double initialTime, int nFirstSolidBins, int nSecondSolidBins, double consConst, double minPorosity)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
double maxValue = -DBL_MAX;
for (size_t d1 = 0; d1 < bdx; d1++)
{
maxValue = max(maxValue, d_compVar->meshXYSum[d1]);
}
__syncthreads();
double valueMeshXY = 1 - (d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix]) / maxValue;
double distanceBetweenCompartments = granulatorLength / nCompartments;
double particleAverageVelocity = granulatorLength / partticleResTime;
double distanceMoved = particleAverageVelocity * timeStep / distanceBetweenCompartments;// value renamed as distanceMoved
d_compVar->particleMovement[idx3] = d_prevCompInData->fAllComingIn[idx3];
d_compVar->particleMovement[idx3] += d_prevCompInData->fAllPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->particleMovement[idx3] -= d_compartmentIn->fAll[idx3] * distanceMoved;
d_compVar->liquidMovement[idx3] = d_prevCompInData->flPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->liquidMovement[idx3] -= d_compartmentIn->fLiquid[idx3] * distanceMoved;
d_compVar->gasMovement[idx3] = d_prevCompInData->fgComingIn[idx3];
d_compVar->gasMovement[idx3] += d_prevCompInData->fgPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->gasMovement[idx3] -= d_compartmentIn->fGas[idx3] * distanceMoved;
double finalTime = premixTime + liqAddTime + initialTime;
if (tix == 0)
{
if (time >= premixTime && time <= finalTime)
d_compartmentIn->liquidAdditionRate[bix] *= timeStep;
else
d_compartmentIn->liquidAdditionRate[bix] = 0.0;
for (int i = bix * bdx; i < (bix+1) * bdx; i++)
d_compVar->totalSolidvolume[bix] += d_compartmentIn->fAll[i] * (d_compartmentIn->vs[(int) floorf((i - bix * bdx) / nFirstSolidBins)] + d_compartmentIn->vss[(i - bix * bdx) % nSecondSolidBins]);
}
// d_compVar->totalSolidvolume[bix] += d_compartmentIn->fAll[idx3] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]);
__syncthreads();
d_compartmentOut->dfAlldt[idx3] = d_compVar->particleMovement[idx3];
d_compartmentOut->dfAlldt[idx3] += d_aggCompVar->formationThroughAggregationCA[idx3] - d_aggCompVar->depletionThroughAggregation[idx3];
d_compartmentOut->dfAlldt[idx3] += d_brCompVar->birthThroughBreakage1[idx3] + d_brCompVar->formationThroughBreakageCA[idx3] - d_brCompVar->depletionThroughBreakage[idx3];
if (d_compVar->totalSolidvolume[bix] > 1.0e-16)
d_brCompVar->transferThroughLiquidAddition[idx3] = d_compartmentIn->liquidAdditionRate[bix] * ((d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) / d_compVar->totalSolidvolume[bix]);
d_compartmentOut->dfLiquiddt[idx3] = d_compVar->liquidMovement[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughLiquidAddition[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] - d_aggCompVar->depletionOfLiquidThroughAggregation[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_brCompVar->liquidBirthThroughBreakage1[idx3] + d_brCompVar->formationOfLiquidThroughBreakageCA[idx3];
d_compartmentOut->dfLiquiddt[idx3] -= d_brCompVar->depletionOfLiquidthroughBreakage[idx3];
if(d_compartmentIn->fGas[idx3] > 1.0e-16)
{
d_brCompVar->transferThroughConsolidation[idx3] = consConst * d_compartmentOut->internalVolumeBins[idx3] * ((1 - minPorosity) / (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]));
d_brCompVar->transferThroughConsolidation[idx3] *= (d_compartmentOut->gasBins[idx3] - (minPorosity / (1-minPorosity)) * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) + d_compVar->internalLiquid[idx3]);
}
else
d_brCompVar->transferThroughConsolidation[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = d_compVar->gasMovement[idx3];
d_compartmentOut->dfGasdt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughConsolidation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_aggCompVar->formationOfGasThroughAggregationCA[idx3] - d_aggCompVar->depletionOfGasThroughAggregation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_brCompVar->gasBirthThroughBreakage1[idx3] + d_brCompVar->formationOfGasThroughBreakageCA[idx3];
d_compartmentOut->dfGasdt[idx3] -= d_brCompVar->depletionOfGasThroughBreakage[idx3];
__syncthreads();
if (tix == 0)
{
for (int i = bix * bdx; i < ((bix +1) * bdx); i++)
{
d_compartmentOut->formationThroughAggregation[bix] += d_aggCompVar->formationThroughAggregationCA[i];
d_compartmentOut->depletionThroughAggregation[bix] += d_aggCompVar->depletionThroughAggregation[i];
d_compartmentOut->formationThroughBreakage[bix] += d_brCompVar->formationThroughBreakageCA[i] + d_brCompVar->birthThroughBreakage1[i];
d_compartmentOut->depletionThroughBreakage[bix] += d_brCompVar->depletionThroughBreakage[i];
}
}
}
// ===================================== MAIN FUNCTION ======================================================
int main(int argc, char *argv[])
{
cout << "Code begins..." << endl;
// Read passed arguments
string startTimeStr;
double startTime = 0.0;
liggghtsData *lData = nullptr;
parameterData *pData = nullptr;
string coreVal;
string diaVal;
string pbmInFilePath;
string timeVal;
if (argc <5)
{
cout << "All values are not available as imput parameters " << endl;
return 1;
}
pbmInFilePath = string(argv[1]);
coreVal = string(argv[2]);
diaVal = string(argv[3]);
timeVal = string(argv[4]);
pData = parameterData::getInstance();
pData->readPBMInputFile(pbmInFilePath);
int nCompartments = pData->nCompartments;
unsigned int nFirstSolidBins = pData->nFirstSolidBins;
unsigned int nSecondSolidBins = pData->nSecondSolidBins;
size_t size1D = nFirstSolidBins;
size_t size2D = size1D * nSecondSolidBins;
size_t size3D = size2D * nCompartments;
size_t size4D = size2D * size2D;
size_t size5D = size4D * nCompartments;
CompartmentIn compartmentIn(size2D, size5D, 0), x_compartmentIn(size2D, size5D, 1), *d_compartmentIn;
PreviousCompartmentIn prevCompInData(size2D, size5D, 0), x_prevCompInData(size2D, size5D, 1), *d_prevCompInData;
CompartmentOut compartmentOut(size2D, size5D, 0), x_compartmentOut(size2D, size5D, 1), *d_compartmentOut;
CompartmentDEMIn compartmentDEMIn(size2D, size5D, 0), x_compartmentDEMIn(size2D, size5D, 1), *d_compartmentDEMIn;
vector<double> h_vs(size1D, 0.0);
vector<double> h_vss(size1D, 0.0);
// Bin Initiation
double fsVolCoeff = pData->fsVolCoeff;
double fsVolBase = pData->fsVolBase;
for (size_t i = 0; i < nFirstSolidBins; i++)
h_vs[i] = fsVolCoeff * pow(fsVolBase, i); // m^3
double ssVolCoeff = pData->ssVolCoeff;
double ssVolBase = pData->ssVolBase;
for (size_t i = 0; i < nSecondSolidBins; i++)
h_vss[i] = ssVolCoeff * pow(ssVolBase, i); // m^3
arrayOfDouble2D diameter1 = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
diameter1[s][ss] = cbrt((6/M_PI) * (h_vs[s] + h_vss[ss]));
vector<double> diameter = linearize2DVector(diameter1);
vector<double> particleIn;
particleIn.push_back(726657587.0);
particleIn.push_back(286654401.0);
particleIn.push_back(118218011.0);
particleIn.push_back(50319795.0);
particleIn.push_back(20954036.0);
particleIn.push_back(7345998.0);
particleIn.push_back(1500147.0);
particleIn.push_back(76518.0);
particleIn.push_back(149.0);
vector<double> h_fIn(size2D, 0.0);
for (size_t i = 0; i < particleIn.size(); i++)
h_fIn[i * size1D + i] = particleIn[i];
// allocation of memory for the matrices that will be copied onto the device from the host
double *d_vs = device_alloc_double_vector(size1D);
double *d_vss = device_alloc_double_vector(size1D);
double *d_sMeshXY = device_alloc_double_vector(size2D);
double *d_ssMeshXY = device_alloc_double_vector(size2D);
double *d_sAgg = device_alloc_double_vector(size2D);
double *d_ssAgg = device_alloc_double_vector(size2D);
int *d_sAggregationCheck = device_alloc_integer_vector(size2D);
int *d_ssAggregationCheck = device_alloc_integer_vector(size2D);
double *d_sLow = device_alloc_double_vector(size2D);
double *d_ssLow = device_alloc_double_vector(size2D);
double *d_sHigh = device_alloc_double_vector(size2D);
double *d_ssHigh = device_alloc_double_vector(size2D);
int *d_sLoc = device_alloc_integer_vector(size2D);
int *d_ssLoc = device_alloc_integer_vector(size2D);
int *d_sInd = device_alloc_integer_vector(size2D);
int *d_ssInd = device_alloc_integer_vector(size2D);
double *d_sBreak = device_alloc_double_vector(size2D);
double *d_ssBreak = device_alloc_double_vector(size2D);
int *d_sLocBreak = device_alloc_integer_vector(size2D);
int *d_ssLocBreak = device_alloc_integer_vector(size2D);
int *d_sCheckB = device_alloc_integer_vector(size2D);
int *d_ssCheckB = device_alloc_integer_vector(size2D);
int *d_sIndB = device_alloc_integer_vector(size2D);
int *d_ssIndB = device_alloc_integer_vector(size2D);
// defining vectors for data required for compartment calculations
vector<double> h_sMeshXY(size2D, 0.0);
vector<double> h_ssMeshXY(size2D, 0.0);
vector<int> h_sAggregationCheck(size2D, 0);
vector<int> h_ssAggregationCheck(size2D, 0);
vector<double> h_sLow(size2D, 0.0);
vector<double> h_ssLow(size2D, 0.0);
vector<double> h_sHigh(size2D, 0.0);
vector<double> h_ssHigh(size2D, 0.0);
vector<int> h_sInd(size2D, 0);
vector<int> h_ssInd(size2D, 0);
vector<int> h_sLoc(size2D, 0);
vector<int> h_ssLoc(size2D, 0);
vector<int> h_sCheckB(size2D, 0);
vector<int> h_ssCheckB(size2D, 0);
vector<int> h_sIndB(size2D, 0.0);
vector<int> h_ssIndB(size2D, 0.0);
vector<int> h_sLocBreak(size2D, 0.0);
vector<int> h_ssLocBreak(size2D, 0.0);
vector<double> h_sBreak(size2D, 0.0);
vector<double> h_ssBreak(size2D, 0.0);
copy_double_vector_fromHtoD(d_vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(d_vss, h_vss.data(), size1D);
int nBlocks = nFirstSolidBins;
int nThreads = nSecondSolidBins;
hipLaunchKernelGGL(( initialization_kernel), dim3(nBlocks),dim3(nThreads), 0, 0, d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( initialization_kernel2), dim3(nBlocks),dim3(nThreads), 0, 0, d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
cout << "Initialization complete" << endl;
// copy back data required for the compartment calculations
copy_double_vector_fromDtoH(h_vs.data(), d_vs, size1D);
copy_double_vector_fromDtoH(h_vss.data(), d_vss, size1D);
copy_double_vector_fromDtoH(h_sMeshXY.data(), d_sMeshXY, size2D);
copy_double_vector_fromDtoH(h_ssMeshXY.data(), d_ssMeshXY, size2D);
copy_integer_vector_fromDtoH(h_sAggregationCheck.data(), d_sAggregationCheck, size2D);
copy_integer_vector_fromDtoH(h_ssAggregationCheck.data(), d_ssAggregationCheck, size2D);
copy_double_vector_fromDtoH(h_sLow.data(), d_sLow, size2D);
copy_double_vector_fromDtoH(h_ssLow.data(), d_ssLow, size2D);
copy_double_vector_fromDtoH(h_sHigh.data(), d_sHigh, size2D);
copy_double_vector_fromDtoH(h_ssHigh.data(), d_ssHigh, size2D);
copy_integer_vector_fromDtoH(h_sInd.data(), d_sInd, size2D);
copy_integer_vector_fromDtoH(h_ssInd.data(), d_ssInd, size2D);
copy_integer_vector_fromDtoH(h_sLoc.data(), d_sLoc, size2D);
copy_integer_vector_fromDtoH(h_ssLoc.data(), d_ssLoc, size2D);
copy_integer_vector_fromDtoH(h_sCheckB.data(), d_sCheckB, size2D);
copy_integer_vector_fromDtoH(h_ssCheckB.data(), d_ssCheckB, size2D);
copy_integer_vector_fromDtoH(h_sIndB.data(), d_sIndB, size2D);
copy_integer_vector_fromDtoH(h_ssIndB.data(), d_ssIndB, size2D);
copy_integer_vector_fromDtoH(h_sLocBreak.data(), d_sLocBreak, size2D);
copy_integer_vector_fromDtoH(h_ssLocBreak.data(), d_ssLocBreak, size2D);
copy_double_vector_fromDtoH(h_sBreak.data(), d_sBreak, size2D);
copy_double_vector_fromDtoH(h_ssBreak.data(), d_ssBreak, size2D);
hipDeviceSynchronize();
// DUMP(h_sMeshXY);
// DUMP(h_ssMeshXY);
// DUMP(h_sAggregationCheck);
// DUMP(h_ssAggregationCheck);
// DUMP(h_sLow);
// DUMP(h_ssLow);
// DUMP(h_sHigh);
// DUMP(h_ssHigh);
// DUMP(h_sInd);
// DUMP(h_ssInd);
// DUMP(h_sCheckB);
// DUMP(h_ssCheckB);
// DUMP(h_sIndB);
// DUMP(h_ssIndB);
// DUMP(h_sLocBreak);
// DUMP(h_ssLocBreak);
// DUMP(h_sBreak);
// DUMP(h_ssBreak);
// DUMP(h_sLoc);
// DUMP(h_ssLoc);
vector<double> h_fAllCompartments(size3D, 0.0);
vector<double> h_flAllCompartments(size3D, 0.0);
vector<double> h_fgAllCompartments(size3D, 0.0);
vector<double> h_dfdtAllCompartments(size3D, 0.0);
vector<double> h_dfldtAllCompartments(size3D, 0.0);
vector<double> h_dfgdtAllCompartments(size3D, 0.0);
vector<double> h_externalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_liquidBinsAllCompartments(size3D, 0.0);
vector<double> h_gasBinsAllCompartments(size3D, 0.0);
vector<double> h_totalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalLiquidAllCompartments(size3D, 0.0);
vector<double> h_externalLiquidAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBins(size2D, 0.0);
vector<double> h_externalVolumeBins(size2D, 0.0);
lData = liggghtsData::getInstance();
lData->readLiggghtsDataFiles(coreVal, diaVal);
vector<double> DEMDiameter = lData->getDEMParticleDiameters();
if ((DEMDiameter).size() == 0)
{
cout << "Diameter data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> DEMImpactData = lData->getFinalDEMImpactData();
if ((DEMImpactData).size() == 0)
{
cout << "Impact data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
arrayOfDouble2D DEMCollisionData = lData->getFinalDEMCollisionData();
if (DEMCollisionData.size() == 0)
{
cout << "Collision data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> velocity = lData->getFinalDEMImpactVelocity();
if (velocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> colVelocity = lData->getFinalDEMCollisionVelocity();
if (colVelocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS collision output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
// moved velocity based probability calculation to the model from kernel.cpp to reduce computation
double demTimeStep = pData->demTimeStep;
copy_double_vector_fromHtoD(x_compartmentDEMIn.velocityCol, colVelocity.data(), size1D);
double inverseDiameterSum = 0.0;
double inverseMassSum = 0.0;
int sized = DEMDiameter.size();
double solDensity = pData->solDensity;
for (int i = 0; i < sized; i++)
{
inverseDiameterSum += (1 / DEMDiameter[i]);
inverseMassSum += (1 / ((4 / 3) * M_PI * pow((DEMDiameter[i] / 2), 3) * solDensity));
}
double coefOfRest = pData->coefOfRest;
double liqThick = pData->liqThick;
double surfAsp = pData->surfAsp;
double bindVisc = pData->bindVisc;
double sumVelo = 0.0;
double harmonic_diameter = sized / inverseDiameterSum;
double harmonic_mass = sized / inverseMassSum;
double uCritical = (10 + (1 / coefOfRest)) * log((liqThick / surfAsp)) * (3 * M_PI * pow(harmonic_diameter, 2) * bindVisc) / (8 * harmonic_mass);
// x_compartmentDEMIn.uCriticalCol[0] = uCritical;
copy_double_vector_fromHtoD(x_compartmentDEMIn.uCriticalCol, &uCritical, 1);
// cout << "Critical velocity for agg is " << uCritical << endl;
int veloSize = colVelocity.size();
for (int i = 0; i < veloSize; i++)
sumVelo += colVelocity[i];
unsigned int nDEMBins = pData->nDEMBins;
double averageVelocity = sumVelo / nDEMBins;
double stdDevVelocity = 0.0;
double varianceVelocity = 0.0;
for (int i = 0; i < veloSize; ++i)
varianceVelocity += pow((colVelocity[i] - averageVelocity), 2) / nDEMBins;
stdDevVelocity = sqrt(varianceVelocity);
//double intVelocity = 0.0;
vector<double> colProbablityOfVelocity(veloSize, 0.0);
for (int i = 0; i < veloSize; i++)
{
colProbablityOfVelocity[i] = (1 / (colVelocity[i] * sqrt(2 * M_PI) * stdDevVelocity)) * exp(-((log(colVelocity[i]) - averageVelocity) / (2 * pow(varianceVelocity, 2))));
// cout << "Probability at " << colVelocity[i] << "is " << colProbablityOfVelocity[i] << endl;
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.colProbability, colProbablityOfVelocity.data(), size1D);
// vector<double> impactFrequency = DEMImpactData;
// for (int s = 0; s < nFirstSolidBins; s++)
// for (int ss = 0; ss < nSecondSolidBins; ss++)
// for (int i = 0; i < nDEMBins; i++)
// {
// if (fAll[n2] > 0.0)
// impactFrequency[i] = (DEMImpactData[i] * timeStep) / demTimeStep;
// }
double critStDefNum = pData->critStDefNum;
double initPorosity = pData->initPorosity;
// cout << critStDefNum << "\t" << solDensity << "\t" << initPorosity << "\t" << bindVisc << endl;
double Ubreak = (2 * critStDefNum / solDensity) * (9 / 8.0) * (pow((1 - initPorosity), 2) / pow(initPorosity, 2)) * (9 / 16.0) * (bindVisc / DEMDiameter[0]);
// x_compartmentDEMIn.ubreak[0] = Ubreak;
copy_double_vector_fromHtoD(x_compartmentDEMIn.ubreak, &Ubreak, 1);
// cout << "Critical velocity for breakage is " << Ubreak << endl;
copy_double_vector_fromHtoD(x_compartmentDEMIn.impVelocity, velocity.data(),size1D);
int size1 = velocity.size();
double sum = 0.0;
for (int i = 0; i < size1; i++)
sum += velocity[i];
double averageVelocityBr = sum / nDEMBins;
double stdDevVelocityBr = 0.0;
double varianceVelocityBr = 0.0;
for (int i = 0; i < size1; ++i)
{
varianceVelocityBr += pow((velocity[i] - averageVelocityBr), 2) / nDEMBins;
}
stdDevVelocityBr = sqrt(varianceVelocityBr);
//double intVelocity = 0.0;
// cout << "Std Dev. of Velocity = " << stdDevVelocity << endl;
vector<double> breakageProbablityOfVelocity(size1, 0.0);
for (int i = 0; i < size1; i++)
{
if (velocity[i] != 0)
{
breakageProbablityOfVelocity[i] = (1 / (velocity[i] * sqrt(2 * M_PI) * stdDevVelocityBr)) * exp(-((log(velocity[i]) - averageVelocityBr) / (2 * pow(varianceVelocityBr, 2))));
// cout << "Probability at " << velocity[i] << "is " << breakageProbablityOfVelocity[i] << endl;
}
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.brProbability, breakageProbablityOfVelocity.data(), size1D);
DUMP2D(DEMCollisionData);
DUMP(DEMDiameter);
DUMP(DEMImpactData);
DUMP(velocity);
//Initialize DEM data for compartment
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMDiameter, DEMDiameter.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMCollisionData, (linearize2DVector(DEMCollisionData)).data(), size2D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMImpactData, DEMImpactData.data(), size1D);
vector<double> liquidAdditionRateAllCompartments(nCompartments, 0.0);
double liqSolidRatio = pData->liqSolidRatio;
double throughput = pData->throughput;
double liqDensity = pData->liqDensity;
double liquidAddRate = (liqSolidRatio * throughput) / (liqDensity * 3600);
liquidAdditionRateAllCompartments[0] = liquidAddRate;
arrayOfDouble2D h_fAllCompartmentsOverTime;
arrayOfDouble2D h_externalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_internalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_liquidBinsAllCompartmentsOverTime;
arrayOfDouble2D h_gasBinsAllCompartmentsOverTime;
double granulatorLength = pData->granulatorLength;
double partticleResTime = pData->partticleResTime;
double particleAveVelo = granulatorLength / partticleResTime;
vector<double> particleAverageVelocity(nCompartments, particleAveVelo);
//Initialize input data for compartment
copy_double_vector_fromHtoD(x_compartmentIn.vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentIn.vss, h_vss.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentIn.diameter, diameter.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sMeshXY, h_sMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssMeshXY, h_ssMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sLow, h_sLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sHigh, h_sHigh.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssLow, h_ssLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssHigh, h_ssHigh.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sAggregationCheck, h_sAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssAggregationCheck, h_ssAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sInd, h_sInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssInd, h_ssInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sCheckB, h_sCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssCheckB, h_ssCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sIndB, h_sIndB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssIndB, h_ssIndB.data(), size2D);
vector<int> sieveGrid;
sieveGrid.push_back(38);
sieveGrid.push_back(63);
sieveGrid.push_back(90);
sieveGrid.push_back(125);
sieveGrid.push_back(250);
sieveGrid.push_back(355);
sieveGrid.push_back(500);
sieveGrid.push_back(710);
sieveGrid.push_back(850);
sieveGrid.push_back(1000);
sieveGrid.push_back(1400);
sieveGrid.push_back(2000);
sieveGrid.push_back(2380);
sieveGrid.push_back(4000);
size_t nSieveGrid = sieveGrid.size();
arrayOfDouble2D d10OverTime;
arrayOfDouble2D d50OverTime;
arrayOfDouble2D d90OverTime;
double time = stod(timeVal); // initial time to start PBM
double timeStep = 0.5; //1.0e-1;
vector<double> Time;
// double lastTime = time;
int timeIdxCount = 0;
// int lastTimeIdxCount = 0;
double premixTime = pData->premixTime;
double liqAddTime = pData->liqAddTime;
double postMixTime = pData->postMixTime;
double finalTime = premixTime + liqAddTime + postMixTime + stod(timeVal);
vector<double *> formationThroughAggregationOverTime;
vector<double *> depletionThroughAggregationOverTime;
vector<double *> formationThroughBreakageOverTime;
vector<double *> depletionThroughBreakageOverTime;
cout << "time" << endl;
// defining compartment varibale pointers
CompartmentVar compVar(size3D, size5D, 0), d_compVarCpy(size3D, size5D, 1), *d_compVar;
AggregationCompVar aggCompVar(size3D, size5D, 0), x_aggCompVar(size3D, size5D, 1), *d_aggCompVar;
BreakageCompVar brCompVar(size3D, size5D, 0), x_brCompVar(size3D, size5D, 1), *d_brCompVar;
// allocating memory for structures used for compartment calculations
err = hipMalloc(&d_compartmentIn, sizeof(CompartmentIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : CompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_prevCompInData, sizeof(PreviousCompartmentIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : prevCompInData (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_compartmentDEMIn, sizeof(CompartmentDEMIn));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : compartmentDEMIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_compVar, sizeof(CompartmentVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : CompartmentVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_aggCompVar, sizeof(AggregationCompVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : AggregationCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_brCompVar, sizeof(BreakageCompVar));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : BreakageCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc(&d_compartmentOut, sizeof(CompartmentOut));
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMalloc : d_compartmentOut (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// copying data to the allocated GPU
hipMemcpy(d_compartmentIn, &x_compartmentIn, sizeof(CompartmentIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_prevCompInData, &x_prevCompInData, sizeof(PreviousCompartmentIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : PreviousCompartmentIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compartmentDEMIn, &x_compartmentDEMIn, sizeof(CompartmentDEMIn), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentDEMIn (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compVar, &d_compVarCpy, sizeof(CompartmentVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
double aggKernelConst = pData->aggKernelConst;
// x_aggCompVar.aggKernelConst[0] = aggKernelConst;
copy_double_vector_fromHtoD(x_aggCompVar.aggKernelConst, &aggKernelConst, 1);
double brkKernelConst = pData->brkKernelConst;
// x_brCompVar.brkKernelConst[0] = brkKernelConst;
copy_double_vector_fromHtoD(x_brCompVar.brkKernelConst, &brkKernelConst, 1);
hipMemcpy(d_aggCompVar, &x_aggCompVar, sizeof(AggregationCompVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : AggregationCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_brCompVar, &x_brCompVar, sizeof(BreakageCompVar), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : BreakageCompVar (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipMemcpy(d_compartmentOut, &x_compartmentOut, sizeof(CompartmentOut), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : compartmentOut (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// vector<double> h_formationThroughAggregation(nCompartments, 0.0);
// vector<double> h_depletionThroughAggregation(nCompartments, 0.0);
// vector<double> h_formationThroughBreakage(nCompartments, 0.0);
// vector<double> h_depletionThroughBreakage(nCompartments, 0.0);
double *d_formationThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_depletionThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_formationThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_depletionThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_fAllCompartments = device_alloc_double_vector(size3D);
double *d_flAllCompartments = device_alloc_double_vector(size3D);
double *d_fgAllCompartments = device_alloc_double_vector(size3D);
double *d_liquidAdditionRateAllCompartments = device_alloc_double_vector(nCompartments);
double *d_fIn = device_alloc_double_vector(size2D);
copy_double_vector_fromHtoD(d_liquidAdditionRateAllCompartments, liquidAdditionRateAllCompartments.data(), nCompartments);
copy_double_vector_fromHtoD(d_fIn, h_fIn.data(), size2D);
// dim3 compKernel_nblocks, compKernel_nthreads;
// compKernel_nblocks = dim3(nCompartments,1,1);
// compKernel_nthreads = dim3(size2D, size2D,1);
// int compKernel_nblocks = 16;
// int compKernel_nthreads = size2D * size2D;
// hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 1792);
// double granulatorLength = pData->granulatorLength;
// double partticleResTime = pData->partticleResTime;
// double premixTime = pData->premixTime;
// double liqAddTime = pData->liqAddTime;
double consConst = pData->consConst;
double minPorosity = pData->minPorosity;
double granSatFactor = pData->granSatFactor;
int threads = size2D;
double initialTime = stod(timeVal);
CompartmentOut h_results(size2D, size5D, 1);
// hipDeviceSynchronize();
while (time <= finalTime)
{
copy_double_vector_fromHtoD(d_fAllCompartments, h_fAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_flAllCompartments, h_flAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_fgAllCompartments, h_fgAllCompartments.data(), size3D);
hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 0);
hipLaunchKernelGGL(( launchCompartment), dim3(nCompartments),dim3(threads), 0, 0, d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar,
time, timeStep, initialTime, d_fAllCompartments, d_flAllCompartments, d_fgAllCompartments,
d_liquidAdditionRateAllCompartments, size2D, size3D, size4D, d_fIn, initPorosity, demTimeStep, nFirstSolidBins, nSecondSolidBins,
granulatorLength, partticleResTime, premixTime, liqAddTime, consConst, minPorosity, nCompartments, granSatFactor, aggKernelConst, brkKernelConst);
// hipDeviceSynchronize();
err = hipSuccess; // check kernel launach
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch launchCompartment kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 compKernel_nblocks, compKernel_nthreads;
hipDeviceSynchronize();
// vector<int> h_idx4(size5D, 0);
// int *d_idx4 = device_alloc_integer_vector(size5D);
// copy_integer_vector_fromHtoD(d_idx4, h_idx4.data(), size5D);
hipLaunchKernelGGL(( performAggCalculations), dim3(nCompartments),dim3(threads), 0, 0, d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_aggCompVar, time, timeStep, initialTime, demTimeStep, nFirstSolidBins, nSecondSolidBins, nCompartments, aggKernelConst/* , d_idx4 */);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("Failed to launch agg kernel (error code %s)!\n", hipGetErrorString(err));
}
// copy_integer_vector_fromDtoH(h_idx4.data(), d_idx4, size5D);
hipDeviceSynchronize();
hipLaunchKernelGGL(( performBreakageCalculations), dim3(nCompartments),dim3(threads), 0, 0, d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_brCompVar, time, timeStep, initialTime, demTimeStep, nFirstSolidBins, nSecondSolidBins, brkKernelConst);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", hipGetErrorString(err));
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( consolidationAndMovementCalcs), dim3(nCompartments),dim3(threads), 0, 0, d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar, nCompartments, granulatorLength, partticleResTime,
time, timeStep, premixTime, liqAddTime, initialTime, nFirstSolidBins, nSecondSolidBins, consConst, minPorosity);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", hipGetErrorString(err));
}
// cout << "Compartment ended " << endl;
hipDeviceSynchronize();
// Copying data strcutres reqd for calculation
err = hipMemcpy(&h_results, d_compartmentOut, sizeof(CompartmentOut), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to hipMemcpy : CompartmentOut D to Hmake (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// copy necessary variables back to the CPU
copy_double_vector_fromDtoH(compartmentOut.dfAlldt, h_results.dfAlldt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfLiquiddt, h_results.dfLiquiddt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfGasdt, h_results.dfGasdt, size3D);
copy_double_vector_fromDtoH(compartmentOut.liquidBins, h_results.liquidBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.gasBins, h_results.gasBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughAggregation, h_results.formationThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughAggregation, h_results.depletionThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughBreakage, h_results.formationThroughBreakage, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughBreakage, h_results.depletionThroughBreakage, size1D);
// copy_double_vector_fromDtoH(h_fAllCompartments.data(), d_fAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_flAllCompartments.data(), d_flAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_fgAllCompartments.data(), d_fgAllCompartments, size3D);
formationThroughAggregationOverTime.push_back(compartmentOut.formationThroughAggregation);
depletionThroughAggregationOverTime.push_back(compartmentOut.depletionThroughAggregation);
formationThroughBreakageOverTime.push_back(compartmentOut.formationThroughBreakage);
depletionThroughBreakageOverTime.push_back(compartmentOut.depletionThroughBreakage);
// for (int w = 0; w < nCompartments; w++)
// {
// cout << "Compartment Number = " << w +1 << endl;
// cout << "compartmentOut.formationThroughAggregation = " << compartmentOut.formationThroughAggregation[w] << endl;
// cout << "compartmentOut.depletionThroughAggregation = " << compartmentOut.depletionThroughAggregation[w] << endl;
// cout << "Agg Ratio = " << compartmentOut.formationThroughAggregation[w] / compartmentOut.depletionThroughAggregation[w] << endl;
// cout << "compartmentOut.formationThroughBreakage = " << compartmentOut.formationThroughBreakage[w] << endl;
// cout << "compartmentOut.depletionThroughBreakage = " << compartmentOut.depletionThroughBreakage[w] << endl;
// cout << "Breakage Ratio = " << compartmentOut.formationThroughBreakage[w] / compartmentOut.depletionThroughBreakage[w] << endl;
// }
double maxofthree = -DBL_MAX;
double maxAll = -DBL_MAX;
double maxLiquid = -DBL_MAX;
double maxGas = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
// cout << "compartmentOut.dfAlldt[" << i << "] is " << compartmentOut.dfAlldt[i] << endl;
if (fabs(h_fAllCompartments[i]) > 1.0e-16)
maxAll = max(maxAll, -compartmentOut.dfAlldt[i] / h_fAllCompartments[i]);
if (fabs(h_flAllCompartments[i]) > 1.0e-16)
maxLiquid = max(maxLiquid, -compartmentOut.dfLiquiddt[i] / h_flAllCompartments[i]);
if (fabs(h_fgAllCompartments[i]) > 1.0e-16)
maxGas = max(maxGas, -compartmentOut.dfGasdt[i] / h_fgAllCompartments[i]);
maxofthree = max(maxofthree, max(maxAll, max(maxLiquid, maxGas)));
}
cout << "maxAll = " << maxAll << endl;
cout << "maxLiquid = " << maxLiquid << endl;
cout << "maxGas = " << maxGas << endl;
cout << "maxofthree = " << maxofthree << endl;
while (maxofthree < 0.1 / timeStep && timeStep < 0.25)
timeStep *= 2.0;
while (maxofthree > 0.1 / timeStep && timeStep > 5.0e-5)
timeStep /= 2.0;
int nanCount = 0;
double minfAll = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
double value = 0.0;
h_fAllCompartments[i] = h_fAllCompartments[i] + compartmentOut.dfAlldt[i] * timeStep;
// cout << " h_fAllCompartments[" << i <<"] is " << h_fAllCompartments[i] << endl;
if (std::isnan(h_fAllCompartments[i]))
nanCount++;
value = h_flAllCompartments[i] + compartmentOut.dfLiquiddt[i] * timeStep;
h_flAllCompartments[i] = value > 0.0 ? value : 0.0;
value = h_fgAllCompartments[i] + compartmentOut.dfGasdt[i] * timeStep;
h_fgAllCompartments[i] = value > 0.0 ? value : 0.0;
}
if (nanCount)
{
cout << endl << "*****fAllCompartments has " << nanCount << "nan values******" << endl << endl;
DUMPCSV(h_fAllCompartments);
exit(EXIT_FAILURE);
}
int countnegfAll = 0;
minfAll = getMinimumOfArray(h_fAllCompartments);
if (minfAll < -1.0e-16 && countnegfAll > 0.1 * nCompartments * nFirstSolidBins * nSecondSolidBins)
{
//int mpi_err = 0;
cout << endl;
//DUMP3DCSV(dfdtAllCompartments);
//DUMP3DCSV(fAllCompartments);
//cout << "My process id = " << mpi_id << endl;
cout << "minfAll" << minfAll << endl;
cout << "******fAllCompartments has negative values********" << endl;
cout << "Number of negative values = " << countnegfAll << endl;
DUMPCSV(h_fAllCompartments);
cout << "Aborting..." << endl;
return 1;
}
// BIN recalculation
for (int c = 0; c < nCompartments; c++)
{
vector<double> liquidBins(size2D, 0.0);
vector<double> gasBins(size2D, 0.0);
vector<double> internalLiquid(size2D, 0.0);
vector<double> externalLiquid(size2D, 0.0);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
int n2 = s * nSecondSolidBins + ss;
if (fabs(h_fAllCompartments[m]) > 1.0e-16)
{
liquidBins[n2] = h_flAllCompartments[m] / h_fAllCompartments[m];
gasBins[n2] = h_fgAllCompartments[m] / h_fAllCompartments[m];
}
internalLiquid[n2] = min(granSatFactor * gasBins[n2], liquidBins[n2]);
externalLiquid[n2] = max(0.0, liquidBins[n2] - internalLiquid[n2]);
double value = compartmentIn.sMeshXY[n2] + compartmentIn.ssMeshXY[n2] + gasBins[n2];
h_internalVolumeBins[n2] = value + internalLiquid[n2];
h_externalVolumeBins[n2] = value + liquidBins[n2];
h_liquidBinsAllCompartments[m] = liquidBins[n2];
h_gasBinsAllCompartments[m] = gasBins[n2];
h_externalVolumeBinsAllCompartments[m] = h_externalVolumeBins[n2];
h_internalVolumeBinsAllCompartments[m] = h_internalVolumeBins[n2];
}
}
vector<double> d10OverCompartment(nCompartments, 0.0);
vector<double> d50OverCompartment(nCompartments, 0.0);
vector<double> d90OverCompartment(nCompartments, 0.0);
for (int c = 0; c < nCompartments; c++)
{
arrayOfDouble2D diameter = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
diameter[s][ss] = cbrt((6 / M_PI) * h_externalVolumeBinsAllCompartments[m]) * 1.0e6;
}
vector<double> totalVolumeGrid(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid - 1; d++)
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
if (diameter[s][ss] < sieveGrid[d + 1] && diameter[s][ss] >= sieveGrid[d])
totalVolumeGrid[d] += h_fAllCompartments[m] * h_externalVolumeBinsAllCompartments[m];
}
double sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
sum += totalVolumeGrid[d];
vector<double> volumeDistribution(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid; d++)
if(sum > 1e-16)
volumeDistribution[d] = totalVolumeGrid[d] / sum;
vector<double> cumulativeVolumeDistribution(nSieveGrid, 0.0);
sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
{
sum += volumeDistribution[d];
cumulativeVolumeDistribution[d] = sum;
}
double d10 = 0.1 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d50 = 0.5 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d90 = 0.9 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
for (size_t d = 1; d < nSieveGrid; d++)
{
double value1 = (sieveGrid[d] - sieveGrid[d - 1]) / (cumulativeVolumeDistribution[d] - cumulativeVolumeDistribution[d - 1]);
double value2 = sieveGrid[d - 1];
if (cumulativeVolumeDistribution[d - 1] < 0.5 && cumulativeVolumeDistribution[d] >= 0.5)
{
double value = 0.5 - cumulativeVolumeDistribution[d - 1];
d50 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.1 - cumulativeVolumeDistribution[d - 1];
d10 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.9 - cumulativeVolumeDistribution[d - 1];
d90 = value * value1 + value2;
}
}
d10OverCompartment[c] = d10;
d50OverCompartment[c] = d50;
d10OverCompartment[c] = d90;
}
Time.push_back(time);
d10OverTime.push_back(d10OverCompartment);
d50OverTime.push_back(d50OverCompartment);
d90OverTime.push_back(d90OverCompartment);
//SAVING OVER TIME
//cout << endl << "************Saving over time" << endl << endl;
h_fAllCompartmentsOverTime.push_back(h_fAllCompartments);
h_externalVolumeBinsAllCompartmentsOverTime.push_back(h_externalVolumeBinsAllCompartments);
h_internalVolumeBinsAllCompartmentsOverTime.push_back(h_internalVolumeBinsAllCompartments);
h_liquidBinsAllCompartmentsOverTime.push_back(h_liquidBinsAllCompartments);
h_gasBinsAllCompartmentsOverTime.push_back(h_gasBinsAllCompartments);
cout << "time = " << time << endl;
cout << "timeStep = " << timeStep << endl;
cout << endl;
timeIdxCount++;
time += timeStep;
// free_double_vector_device(h_results.dfAlldt);
// free_double_vector_device(h_results.dfLiquiddt);
// free_double_vector_device(h_results.dfGasdt);
// free_double_vector_device(h_results.liquidBins);
// free_double_vector_device(h_results.gasBins);
// free_double_vector_device(h_results.formationThroughAggregation);
// free_double_vector_device(h_results.depletionThroughAggregation);
// free_double_vector_device(h_results.formationThroughBreakage);
// free_double_vector_device(h_results.depletionThroughBreakage);
}
size_t nTimeSteps = Time.size();
cout << endl
<< "nTimeSteps = " << nTimeSteps << endl
<< endl;
//dump values for ratio plots
dumpDiaCSVpointer(Time, formationThroughAggregationOverTime, Time.size() * nCompartments, string("FormationThroughAggregation"));
dumpDiaCSVpointer(Time, depletionThroughAggregationOverTime, Time.size() * nCompartments, string("DepletionThroughAggregation"));
dumpDiaCSVpointer(Time, formationThroughBreakageOverTime, Time.size() * nCompartments, string("FormationThroughBreakage"));
dumpDiaCSVpointer(Time, depletionThroughBreakageOverTime, Time.size() * nCompartments, string("DepletionThroughBreakage"));
dumpDiaCSV(Time, d50OverTime, string(("d50OverTime")));
double endTime = static_cast<double>(clock()) / static_cast<double>(CLOCKS_PER_SEC);
cout << "That took " << endTime - startTime << " seconds" << endl;
cout << "Code End" << endl;
return 0;
// vector<double> h(size4D, 0.0);
// for (int i = 0; i < size5D; i++)
// {
// cout << "At i = " << i << " kernel = " << compartmentOut.aggregationKernel[i] << endl;
// }
// hipFree(d_vs);
// hipFree(d_vss);
// hipFree(d_sMeshXY);
// hipFree(d_ssMeshXY);
// hipFree(d_compartmentIn);
} | a19157d7c4049b4085bd353e1694712424e6eaaa.cu | #include <vector>
#include <cmath>
#include <float.h>
#include <string>
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include "utility.cuh"
#include "parameterData.h"
#include "liggghtsData.h"
#include "compartment.cuh"
using namespace std;
#define TWOWAYCOUPLING false
// MACROS
// Calling macros for error check and dump data to files to VaribleName.txt
#define DUMP(varName) dumpData(varName, #varName)
#define DUMP2D(varName) dump2DData(varName, #varName)
#define DUMP3D(varName) dump3DData(varName, #varName)
#define DUMPCSV(varName) dumpCSV(varName, #varName)
#define DUMP2DCSV(varName) dump2DCSV(varName, #varName)
#define DUMP3DCSV(varName) dump3DCSV(varName, #varName)
#define DUMPDIACSV(time, dia) dumpDiaCSV(time, dia, #dia)
#define DUMP2DCSV4MATLAB(varName) dump2DCSV4Matlab(varName, #varName)
// extern __shared__ double *d_sMeshXY, *d_ssMeshXY;
// ==================================== INITIALIZATION KERNEL ===================================================
__global__ void initialization_kernel(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
// __shared__ double d_sMeshXY[256], d_ssMeshXY[256];
d_sMeshXY[bdx * bix + idx] = d_vs[bix];
d_ssMeshXY[bdx * bix + idx] = d_vss[idx];
d_sAgg[bdx * bix + idx] = d_vs[idx] + d_vs[bix];
d_ssAgg[bdx * bix + idx] = d_vss[idx] + d_vss[bix];
d_sAggregationCheck[bdx * bix + idx] = d_sAgg[bdx * bix + idx] <= d_vs[bdx - 1] ? 1 : 0;
d_ssAggregationCheck[bdx * bix + idx] = d_ssAgg[bdx * bix + idx] <= d_vss[bdx - 1] ? 1 : 0;
d_sLow [bdx * bix + idx] = d_sMeshXY[bdx * bix + idx];
d_ssLow[bdx * bix + idx] = d_ssMeshXY[bdx * bix + idx];
__syncthreads();
if (bix < bdx -1)
d_sHigh[bdx * bix + idx] = d_sMeshXY[bdx * (bix + 1) + idx];
d_ssHigh[bdx * bix + idx] = d_ssMeshXY[bdx * (bix) + idx + 1];
d_sHigh[bdx * (bdx -1) + idx] = 0.0;
d_ssHigh[bdx * bix + bdx - 1] = 0.0;
d_sLoc[bdx * bix + idx] = floor(log(d_sAgg[bdx * bix + idx] / fsVolCoeff) / log(fsVolBase) + 1);
d_ssLoc[bdx * bix + idx] = floor(log(d_ssAgg[bdx * bix + idx] / ssVolCoeff) / log(ssVolBase) + 1);
d_sInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
d_ssInd[bdx * bix + idx] = (idx <= bix) ? (bix + 1) : (idx + 1);
__syncthreads();
double value = d_vs[idx] - d_vs[bix];
double value1 = d_vss[idx] - d_vss[bix];
d_sBreak[bdx * bix + idx] = value < 0.0 ? 0.0 : value;
d_ssBreak[bdx * bix + idx] = value1 < 0.0 ? 0.0 : value1;
__syncthreads();
}
__global__ void initialization_kernel2(double *d_vs, double *d_vss, size_t size2D, double fsVolCoeff, double ssVolCoeff, double fsVolBase, double ssVolBase, double *d_sAgg,
double *d_ssAgg, int *d_sAggregationCheck, int *d_ssAggregationCheck, double *d_sLow, double *d_ssLow, double *d_sHigh, double *d_ssHigh,
double *d_sMeshXY, double *d_ssMeshXY, int *d_sLoc, int *d_ssLoc, int *d_sInd, int *d_ssInd, double *d_sBreak, double *d_ssBreak,
int *d_sLocBreak, int *d_ssLocBreak, int *d_sCheckB, int*d_ssCheckB, int *d_sIndB, int *d_ssIndB)
{
int idx = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
d_sLocBreak[bdx * bix + idx] = (d_sBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_sBreak[bdx * idx + bix] / fsVolCoeff) / log(fsVolBase) + 1));
d_ssLocBreak[bdx * bix + idx] = (d_ssBreak[bdx * idx + bix] == 0) ? 0 : (floor(log(d_ssBreak[bdx * idx + bix] / ssVolCoeff) / log(ssVolBase) + 1));
__syncthreads();
d_sCheckB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_ssCheckB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx] >= 1 ? 1 : 0;
d_sIndB[bdx * bix + idx] = d_sLocBreak[bdx * bix + idx];
d_ssIndB[bdx * bix + idx] = d_ssLocBreak[bdx * bix + idx];
if (d_sIndB[bdx * bix + idx] < 1)
d_sIndB[bdx * bix + idx] = bdx + 1;
if (d_ssIndB[bdx * bix + idx] < 1)
d_ssIndB[bdx * bix + idx] = bdx + 1;
__syncthreads();
}
// ================================= COMPARTMENT LAUNCH KERNEL ============================================================
__global__ void launchCompartment(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, double time, double timeStep, double initialTime,
double *d_fAllCompartments, double *d_flAllCompartments, double *d_fgAllCompartments, double *d_liquidAdditionRateAllCompartments,
unsigned int size2D, unsigned int size3D, unsigned int size4D, double *d_fIn, double initPorosity, double demTimeStep, int nFirstSolidBins, int nSecondSolidBins,
double granulatorLength, double partticleResTime, double premixTime, double liqAddTime, double consConst, double minPorosity, int nCompartments,
double granSatFactor, double aggKernelConst, double brkKernelConst)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
if (tix ==0)
{
d_compartmentOut->formationThroughAggregation[bix] = 0.0;
d_compartmentOut->depletionThroughAggregation[bix] = 0.0;
d_compartmentOut->formationThroughBreakage[bix] = 0.0;
d_compartmentOut->depletionThroughBreakage[bix] = 0.0;
}
// int tiy = threadIdx.y;
d_compartmentOut->dfAlldt[idx3] = 0.0;
d_compartmentOut->dfLiquiddt[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = 0.0;
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
d_compartmentOut->internalVolumeBins[idx3] = 0.0;
d_compartmentOut->externalVolumeBins[idx3] = 0.0;
// int idx = bix * bdx * bdy + tiy * bdx + tix;
//if (tiy == 0)
d_compartmentIn->fAll[idx3] = d_fAllCompartments[idx3];
d_compartmentIn->fLiquid[idx3] = d_flAllCompartments[idx3];
d_compartmentIn->fGas[idx3] = d_fgAllCompartments[idx3];
d_compartmentIn->liquidAdditionRate[bix] = d_liquidAdditionRateAllCompartments[bix];
if (bix == 0)
{
d_prevCompInData->fAllPreviousCompartment[idx3] = 0.0;
d_prevCompInData->flPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fgPreviousCompartment[idx3] = 0.0;
d_prevCompInData->fAllComingIn[idx3] = d_fIn[tix];
d_prevCompInData->fgComingIn[idx3] = 0.0;
double value = initPorosity * timeStep;
d_prevCompInData->fgComingIn[idx3] = d_fIn[tix] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) * value;
}
else
{
d_prevCompInData->fAllPreviousCompartment[idx3] = d_fAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->flPreviousCompartment[idx3] = d_flAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fgPreviousCompartment[idx3] = d_fgAllCompartments[(bix - 1) * bdx + tix];
d_prevCompInData->fAllComingIn[idx3] = 0.0;
d_prevCompInData->fgComingIn[idx3] = 0.0;
}
__syncthreads();
if (fabs(d_compartmentIn->fAll[idx3]) > 1e-16)
{
d_compartmentOut->liquidBins[idx3] = d_compartmentIn->fLiquid[idx3] / d_compartmentIn->fAll[idx3];
d_compartmentOut->gasBins[idx3] = d_compartmentIn->fGas[idx3] / d_compartmentIn->fAll[idx3];
}
else
{
d_compartmentOut->liquidBins[idx3] = 0.0;
d_compartmentOut->gasBins[idx3] = 0.0;
}
d_aggCompVar->depletionThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfGasThroughAggregation[idx3] = 0.0;
d_aggCompVar->depletionOfLiquidThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->liquidBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->gasBirthThroughAggregation[idx3] = 0.0;
d_aggCompVar->firstSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->secondSolidVolumeThroughAggregation[idx3] = 0.0;
d_aggCompVar->birthAggLowHigh[idx3] = 0.0;
d_aggCompVar->birthAggLowHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowHighGas[idx3] = 0.0;
d_aggCompVar->birthAggHighLow[idx3] = 0.0;
d_aggCompVar->birthAggHighLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighLowGas[idx3] = 0.0;
d_aggCompVar->birthAggLowLow[idx3] = 0.0;
d_aggCompVar->birthAggLowLowLiq[idx3] = 0.0;
d_aggCompVar->birthAggLowLowGas[idx3] = 0.0;
d_aggCompVar->birthAggHighHigh[idx3] = 0.0;
d_aggCompVar->birthAggHighHighLiq[idx3] = 0.0;
d_aggCompVar->birthAggHighHighGas[idx3] = 0.0;
d_aggCompVar->formationThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] = 0.0;
d_aggCompVar->formationOfGasThroughAggregationCA[idx3] = 0.0;
d_brCompVar->fractionBreakage00[idx3] = 0.0;
d_brCompVar->fractionBreakage01[idx3] = 0.0;
d_brCompVar->fractionBreakage10[idx3] = 0.0;
d_brCompVar->fractionBreakage11[idx3] = 0.0;
d_brCompVar->transferThroughLiquidAddition[idx3] = 0.0;
d_brCompVar->transferThroughConsolidation[idx3] = 0.0;
d_brCompVar->depletionThroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfLiquidthroughBreakage[idx3] = 0.0;
d_brCompVar->depletionOfGasThroughBreakage[idx3] = 0.0;
d_brCompVar->birthThroughBreakage1[idx3] = 0.0;
d_brCompVar->birthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidBirthThroughBreakage[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->liquidBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage1[idx3] = 0.0;
d_brCompVar->gasBirthThroughBreakage2[idx3] = 0.0;
d_brCompVar->firstSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->secondSolidVolumeThroughBreakage[idx3] = 0.0;
d_brCompVar->formationThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfLiquidThroughBreakageCA[idx3] = 0.0;
d_brCompVar->formationOfGasThroughBreakageCA[idx3] = 0.0;
d_compVar->internalLiquid[idx3] = 0.0;
d_compVar->externalLiquid[idx3] = 0.0;
d_compVar->externalLiquidContent[idx3] = 0.0;
d_compVar->volumeBins[tix] = 0.0;
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compVar->meshXYSum[tix] = 0.0;
d_compVar->totalSolidvolume[bix] = 0.0;
d_compVar->particleMovement[idx3] = 0.0;
d_compVar->liquidMovement[idx3] = 0.0;
d_compVar->gasMovement[idx3] = 0.0;
d_compVar->internalLiquid[idx3] = min((granSatFactor * d_compartmentOut->gasBins[idx3]), d_compartmentOut->liquidBins[idx3]);
d_compVar->externalLiquid[idx3] = max(0.0, (d_compartmentOut->liquidBins[idx3] - d_compVar->internalLiquid[idx3]));
d_compartmentOut->internalVolumeBins[idx3] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix] + d_compVar->internalLiquid[idx3] + d_compartmentOut->gasBins[idx3];
d_compVar->meshXYSum[tix] = d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix];
__syncthreads();
}
// printf("d_compartmentOut->liquidBins = %f \n", d_compartmentOut->liquidBins[tix]);
__global__ void consolidationAndMovementCalcs(CompartmentIn *d_compartmentIn, PreviousCompartmentIn *d_prevCompInData, CompartmentOut *d_compartmentOut, CompartmentDEMIn *d_compartmentDEMIn,
CompartmentVar *d_compVar, AggregationCompVar *d_aggCompVar, BreakageCompVar *d_brCompVar, int nCompartments, double granulatorLength, double partticleResTime,
double time, double timeStep, double premixTime, double liqAddTime, double initialTime, int nFirstSolidBins, int nSecondSolidBins, double consConst, double minPorosity)
{
int bix = blockIdx.x;
int bdx = blockDim.x;
int tix = threadIdx.x;
int idx3 = bix * bdx + tix;
int s1 = (int) floorf(tix / nFirstSolidBins);
int ss1 = tix % nSecondSolidBins;
double maxValue = -DBL_MAX;
for (size_t d1 = 0; d1 < bdx; d1++)
{
maxValue = max(maxValue, d_compVar->meshXYSum[d1]);
}
__syncthreads();
double valueMeshXY = 1 - (d_compartmentIn->sMeshXY[tix] + d_compartmentIn->ssMeshXY[tix]) / maxValue;
double distanceBetweenCompartments = granulatorLength / nCompartments;
double particleAverageVelocity = granulatorLength / partticleResTime;
double distanceMoved = particleAverageVelocity * timeStep / distanceBetweenCompartments;// value renamed as distanceMoved
d_compVar->particleMovement[idx3] = d_prevCompInData->fAllComingIn[idx3];
d_compVar->particleMovement[idx3] += d_prevCompInData->fAllPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->particleMovement[idx3] -= d_compartmentIn->fAll[idx3] * distanceMoved;
d_compVar->liquidMovement[idx3] = d_prevCompInData->flPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->liquidMovement[idx3] -= d_compartmentIn->fLiquid[idx3] * distanceMoved;
d_compVar->gasMovement[idx3] = d_prevCompInData->fgComingIn[idx3];
d_compVar->gasMovement[idx3] += d_prevCompInData->fgPreviousCompartment[idx3] * distanceMoved * valueMeshXY;
d_compVar->gasMovement[idx3] -= d_compartmentIn->fGas[idx3] * distanceMoved;
double finalTime = premixTime + liqAddTime + initialTime;
if (tix == 0)
{
if (time >= premixTime && time <= finalTime)
d_compartmentIn->liquidAdditionRate[bix] *= timeStep;
else
d_compartmentIn->liquidAdditionRate[bix] = 0.0;
for (int i = bix * bdx; i < (bix+1) * bdx; i++)
d_compVar->totalSolidvolume[bix] += d_compartmentIn->fAll[i] * (d_compartmentIn->vs[(int) floorf((i - bix * bdx) / nFirstSolidBins)] + d_compartmentIn->vss[(i - bix * bdx) % nSecondSolidBins]);
}
// d_compVar->totalSolidvolume[bix] += d_compartmentIn->fAll[idx3] * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]);
__syncthreads();
d_compartmentOut->dfAlldt[idx3] = d_compVar->particleMovement[idx3];
d_compartmentOut->dfAlldt[idx3] += d_aggCompVar->formationThroughAggregationCA[idx3] - d_aggCompVar->depletionThroughAggregation[idx3];
d_compartmentOut->dfAlldt[idx3] += d_brCompVar->birthThroughBreakage1[idx3] + d_brCompVar->formationThroughBreakageCA[idx3] - d_brCompVar->depletionThroughBreakage[idx3];
if (d_compVar->totalSolidvolume[bix] > 1.0e-16)
d_brCompVar->transferThroughLiquidAddition[idx3] = d_compartmentIn->liquidAdditionRate[bix] * ((d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) / d_compVar->totalSolidvolume[bix]);
d_compartmentOut->dfLiquiddt[idx3] = d_compVar->liquidMovement[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughLiquidAddition[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_aggCompVar->formationOfLiquidThroughAggregationCA[idx3] - d_aggCompVar->depletionOfLiquidThroughAggregation[idx3];
d_compartmentOut->dfLiquiddt[idx3] += d_brCompVar->liquidBirthThroughBreakage1[idx3] + d_brCompVar->formationOfLiquidThroughBreakageCA[idx3];
d_compartmentOut->dfLiquiddt[idx3] -= d_brCompVar->depletionOfLiquidthroughBreakage[idx3];
if(d_compartmentIn->fGas[idx3] > 1.0e-16)
{
d_brCompVar->transferThroughConsolidation[idx3] = consConst * d_compartmentOut->internalVolumeBins[idx3] * ((1 - minPorosity) / (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]));
d_brCompVar->transferThroughConsolidation[idx3] *= (d_compartmentOut->gasBins[idx3] - (minPorosity / (1-minPorosity)) * (d_compartmentIn->vs[s1] + d_compartmentIn->vss[ss1]) + d_compVar->internalLiquid[idx3]);
}
else
d_brCompVar->transferThroughConsolidation[idx3] = 0.0;
d_compartmentOut->dfGasdt[idx3] = d_compVar->gasMovement[idx3];
d_compartmentOut->dfGasdt[idx3] += d_compartmentIn->fAll[idx3] * d_brCompVar->transferThroughConsolidation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_aggCompVar->formationOfGasThroughAggregationCA[idx3] - d_aggCompVar->depletionOfGasThroughAggregation[idx3];
d_compartmentOut->dfGasdt[idx3] += d_brCompVar->gasBirthThroughBreakage1[idx3] + d_brCompVar->formationOfGasThroughBreakageCA[idx3];
d_compartmentOut->dfGasdt[idx3] -= d_brCompVar->depletionOfGasThroughBreakage[idx3];
__syncthreads();
if (tix == 0)
{
for (int i = bix * bdx; i < ((bix +1) * bdx); i++)
{
d_compartmentOut->formationThroughAggregation[bix] += d_aggCompVar->formationThroughAggregationCA[i];
d_compartmentOut->depletionThroughAggregation[bix] += d_aggCompVar->depletionThroughAggregation[i];
d_compartmentOut->formationThroughBreakage[bix] += d_brCompVar->formationThroughBreakageCA[i] + d_brCompVar->birthThroughBreakage1[i];
d_compartmentOut->depletionThroughBreakage[bix] += d_brCompVar->depletionThroughBreakage[i];
}
}
}
// ===================================== MAIN FUNCTION ======================================================
int main(int argc, char *argv[])
{
cout << "Code begins..." << endl;
// Read passed arguments
string startTimeStr;
double startTime = 0.0;
liggghtsData *lData = nullptr;
parameterData *pData = nullptr;
string coreVal;
string diaVal;
string pbmInFilePath;
string timeVal;
if (argc <5)
{
cout << "All values are not available as imput parameters " << endl;
return 1;
}
pbmInFilePath = string(argv[1]);
coreVal = string(argv[2]);
diaVal = string(argv[3]);
timeVal = string(argv[4]);
pData = parameterData::getInstance();
pData->readPBMInputFile(pbmInFilePath);
int nCompartments = pData->nCompartments;
unsigned int nFirstSolidBins = pData->nFirstSolidBins;
unsigned int nSecondSolidBins = pData->nSecondSolidBins;
size_t size1D = nFirstSolidBins;
size_t size2D = size1D * nSecondSolidBins;
size_t size3D = size2D * nCompartments;
size_t size4D = size2D * size2D;
size_t size5D = size4D * nCompartments;
CompartmentIn compartmentIn(size2D, size5D, 0), x_compartmentIn(size2D, size5D, 1), *d_compartmentIn;
PreviousCompartmentIn prevCompInData(size2D, size5D, 0), x_prevCompInData(size2D, size5D, 1), *d_prevCompInData;
CompartmentOut compartmentOut(size2D, size5D, 0), x_compartmentOut(size2D, size5D, 1), *d_compartmentOut;
CompartmentDEMIn compartmentDEMIn(size2D, size5D, 0), x_compartmentDEMIn(size2D, size5D, 1), *d_compartmentDEMIn;
vector<double> h_vs(size1D, 0.0);
vector<double> h_vss(size1D, 0.0);
// Bin Initiation
double fsVolCoeff = pData->fsVolCoeff;
double fsVolBase = pData->fsVolBase;
for (size_t i = 0; i < nFirstSolidBins; i++)
h_vs[i] = fsVolCoeff * pow(fsVolBase, i); // m^3
double ssVolCoeff = pData->ssVolCoeff;
double ssVolBase = pData->ssVolBase;
for (size_t i = 0; i < nSecondSolidBins; i++)
h_vss[i] = ssVolCoeff * pow(ssVolBase, i); // m^3
arrayOfDouble2D diameter1 = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
diameter1[s][ss] = cbrt((6/M_PI) * (h_vs[s] + h_vss[ss]));
vector<double> diameter = linearize2DVector(diameter1);
vector<double> particleIn;
particleIn.push_back(726657587.0);
particleIn.push_back(286654401.0);
particleIn.push_back(118218011.0);
particleIn.push_back(50319795.0);
particleIn.push_back(20954036.0);
particleIn.push_back(7345998.0);
particleIn.push_back(1500147.0);
particleIn.push_back(76518.0);
particleIn.push_back(149.0);
vector<double> h_fIn(size2D, 0.0);
for (size_t i = 0; i < particleIn.size(); i++)
h_fIn[i * size1D + i] = particleIn[i];
// allocation of memory for the matrices that will be copied onto the device from the host
double *d_vs = device_alloc_double_vector(size1D);
double *d_vss = device_alloc_double_vector(size1D);
double *d_sMeshXY = device_alloc_double_vector(size2D);
double *d_ssMeshXY = device_alloc_double_vector(size2D);
double *d_sAgg = device_alloc_double_vector(size2D);
double *d_ssAgg = device_alloc_double_vector(size2D);
int *d_sAggregationCheck = device_alloc_integer_vector(size2D);
int *d_ssAggregationCheck = device_alloc_integer_vector(size2D);
double *d_sLow = device_alloc_double_vector(size2D);
double *d_ssLow = device_alloc_double_vector(size2D);
double *d_sHigh = device_alloc_double_vector(size2D);
double *d_ssHigh = device_alloc_double_vector(size2D);
int *d_sLoc = device_alloc_integer_vector(size2D);
int *d_ssLoc = device_alloc_integer_vector(size2D);
int *d_sInd = device_alloc_integer_vector(size2D);
int *d_ssInd = device_alloc_integer_vector(size2D);
double *d_sBreak = device_alloc_double_vector(size2D);
double *d_ssBreak = device_alloc_double_vector(size2D);
int *d_sLocBreak = device_alloc_integer_vector(size2D);
int *d_ssLocBreak = device_alloc_integer_vector(size2D);
int *d_sCheckB = device_alloc_integer_vector(size2D);
int *d_ssCheckB = device_alloc_integer_vector(size2D);
int *d_sIndB = device_alloc_integer_vector(size2D);
int *d_ssIndB = device_alloc_integer_vector(size2D);
// defining vectors for data required for compartment calculations
vector<double> h_sMeshXY(size2D, 0.0);
vector<double> h_ssMeshXY(size2D, 0.0);
vector<int> h_sAggregationCheck(size2D, 0);
vector<int> h_ssAggregationCheck(size2D, 0);
vector<double> h_sLow(size2D, 0.0);
vector<double> h_ssLow(size2D, 0.0);
vector<double> h_sHigh(size2D, 0.0);
vector<double> h_ssHigh(size2D, 0.0);
vector<int> h_sInd(size2D, 0);
vector<int> h_ssInd(size2D, 0);
vector<int> h_sLoc(size2D, 0);
vector<int> h_ssLoc(size2D, 0);
vector<int> h_sCheckB(size2D, 0);
vector<int> h_ssCheckB(size2D, 0);
vector<int> h_sIndB(size2D, 0.0);
vector<int> h_ssIndB(size2D, 0.0);
vector<int> h_sLocBreak(size2D, 0.0);
vector<int> h_ssLocBreak(size2D, 0.0);
vector<double> h_sBreak(size2D, 0.0);
vector<double> h_ssBreak(size2D, 0.0);
copy_double_vector_fromHtoD(d_vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(d_vss, h_vss.data(), size1D);
int nBlocks = nFirstSolidBins;
int nThreads = nSecondSolidBins;
initialization_kernel<<<nBlocks,nThreads>>>(d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
initialization_kernel2<<<nBlocks,nThreads>>>(d_vs, d_vss, size2D, fsVolCoeff, ssVolCoeff, fsVolBase, ssVolBase, d_sAgg,d_ssAgg, d_sAggregationCheck, d_ssAggregationCheck,
d_sLow, d_ssLow, d_sHigh, d_ssHigh, d_sMeshXY, d_ssMeshXY, d_sLoc, d_ssLoc, d_sInd, d_ssInd, d_sBreak, d_ssBreak, d_sLocBreak, d_ssLocBreak,
d_sCheckB, d_ssCheckB, d_sIndB, d_ssIndB);
err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
cout << "Initialization complete" << endl;
// copy back data required for the compartment calculations
copy_double_vector_fromDtoH(h_vs.data(), d_vs, size1D);
copy_double_vector_fromDtoH(h_vss.data(), d_vss, size1D);
copy_double_vector_fromDtoH(h_sMeshXY.data(), d_sMeshXY, size2D);
copy_double_vector_fromDtoH(h_ssMeshXY.data(), d_ssMeshXY, size2D);
copy_integer_vector_fromDtoH(h_sAggregationCheck.data(), d_sAggregationCheck, size2D);
copy_integer_vector_fromDtoH(h_ssAggregationCheck.data(), d_ssAggregationCheck, size2D);
copy_double_vector_fromDtoH(h_sLow.data(), d_sLow, size2D);
copy_double_vector_fromDtoH(h_ssLow.data(), d_ssLow, size2D);
copy_double_vector_fromDtoH(h_sHigh.data(), d_sHigh, size2D);
copy_double_vector_fromDtoH(h_ssHigh.data(), d_ssHigh, size2D);
copy_integer_vector_fromDtoH(h_sInd.data(), d_sInd, size2D);
copy_integer_vector_fromDtoH(h_ssInd.data(), d_ssInd, size2D);
copy_integer_vector_fromDtoH(h_sLoc.data(), d_sLoc, size2D);
copy_integer_vector_fromDtoH(h_ssLoc.data(), d_ssLoc, size2D);
copy_integer_vector_fromDtoH(h_sCheckB.data(), d_sCheckB, size2D);
copy_integer_vector_fromDtoH(h_ssCheckB.data(), d_ssCheckB, size2D);
copy_integer_vector_fromDtoH(h_sIndB.data(), d_sIndB, size2D);
copy_integer_vector_fromDtoH(h_ssIndB.data(), d_ssIndB, size2D);
copy_integer_vector_fromDtoH(h_sLocBreak.data(), d_sLocBreak, size2D);
copy_integer_vector_fromDtoH(h_ssLocBreak.data(), d_ssLocBreak, size2D);
copy_double_vector_fromDtoH(h_sBreak.data(), d_sBreak, size2D);
copy_double_vector_fromDtoH(h_ssBreak.data(), d_ssBreak, size2D);
cudaDeviceSynchronize();
// DUMP(h_sMeshXY);
// DUMP(h_ssMeshXY);
// DUMP(h_sAggregationCheck);
// DUMP(h_ssAggregationCheck);
// DUMP(h_sLow);
// DUMP(h_ssLow);
// DUMP(h_sHigh);
// DUMP(h_ssHigh);
// DUMP(h_sInd);
// DUMP(h_ssInd);
// DUMP(h_sCheckB);
// DUMP(h_ssCheckB);
// DUMP(h_sIndB);
// DUMP(h_ssIndB);
// DUMP(h_sLocBreak);
// DUMP(h_ssLocBreak);
// DUMP(h_sBreak);
// DUMP(h_ssBreak);
// DUMP(h_sLoc);
// DUMP(h_ssLoc);
vector<double> h_fAllCompartments(size3D, 0.0);
vector<double> h_flAllCompartments(size3D, 0.0);
vector<double> h_fgAllCompartments(size3D, 0.0);
vector<double> h_dfdtAllCompartments(size3D, 0.0);
vector<double> h_dfldtAllCompartments(size3D, 0.0);
vector<double> h_dfgdtAllCompartments(size3D, 0.0);
vector<double> h_externalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_liquidBinsAllCompartments(size3D, 0.0);
vector<double> h_gasBinsAllCompartments(size3D, 0.0);
vector<double> h_totalVolumeBinsAllCompartments(size3D, 0.0);
vector<double> h_internalLiquidAllCompartments(size3D, 0.0);
vector<double> h_externalLiquidAllCompartments(size3D, 0.0);
vector<double> h_internalVolumeBins(size2D, 0.0);
vector<double> h_externalVolumeBins(size2D, 0.0);
lData = liggghtsData::getInstance();
lData->readLiggghtsDataFiles(coreVal, diaVal);
vector<double> DEMDiameter = lData->getDEMParticleDiameters();
if ((DEMDiameter).size() == 0)
{
cout << "Diameter data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> DEMImpactData = lData->getFinalDEMImpactData();
if ((DEMImpactData).size() == 0)
{
cout << "Impact data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
arrayOfDouble2D DEMCollisionData = lData->getFinalDEMCollisionData();
if (DEMCollisionData.size() == 0)
{
cout << "Collision data is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> velocity = lData->getFinalDEMImpactVelocity();
if (velocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
vector<double> colVelocity = lData->getFinalDEMCollisionVelocity();
if (colVelocity.size() == 0)
{
cout << "Velocity is missing in LIGGGHTS collision output file" << endl;
cout << "Input parameters for DEM core and diameter aren't matching with LIGGGHTS output file" << endl;
return 1;
}
// moved velocity based probability calculation to the model from kernel.cpp to reduce computation
double demTimeStep = pData->demTimeStep;
copy_double_vector_fromHtoD(x_compartmentDEMIn.velocityCol, colVelocity.data(), size1D);
double inverseDiameterSum = 0.0;
double inverseMassSum = 0.0;
int sized = DEMDiameter.size();
double solDensity = pData->solDensity;
for (int i = 0; i < sized; i++)
{
inverseDiameterSum += (1 / DEMDiameter[i]);
inverseMassSum += (1 / ((4 / 3) * M_PI * pow((DEMDiameter[i] / 2), 3) * solDensity));
}
double coefOfRest = pData->coefOfRest;
double liqThick = pData->liqThick;
double surfAsp = pData->surfAsp;
double bindVisc = pData->bindVisc;
double sumVelo = 0.0;
double harmonic_diameter = sized / inverseDiameterSum;
double harmonic_mass = sized / inverseMassSum;
double uCritical = (10 + (1 / coefOfRest)) * log((liqThick / surfAsp)) * (3 * M_PI * pow(harmonic_diameter, 2) * bindVisc) / (8 * harmonic_mass);
// x_compartmentDEMIn.uCriticalCol[0] = uCritical;
copy_double_vector_fromHtoD(x_compartmentDEMIn.uCriticalCol, &uCritical, 1);
// cout << "Critical velocity for agg is " << uCritical << endl;
int veloSize = colVelocity.size();
for (int i = 0; i < veloSize; i++)
sumVelo += colVelocity[i];
unsigned int nDEMBins = pData->nDEMBins;
double averageVelocity = sumVelo / nDEMBins;
double stdDevVelocity = 0.0;
double varianceVelocity = 0.0;
for (int i = 0; i < veloSize; ++i)
varianceVelocity += pow((colVelocity[i] - averageVelocity), 2) / nDEMBins;
stdDevVelocity = sqrt(varianceVelocity);
//double intVelocity = 0.0;
vector<double> colProbablityOfVelocity(veloSize, 0.0);
for (int i = 0; i < veloSize; i++)
{
colProbablityOfVelocity[i] = (1 / (colVelocity[i] * sqrt(2 * M_PI) * stdDevVelocity)) * exp(-((log(colVelocity[i]) - averageVelocity) / (2 * pow(varianceVelocity, 2))));
// cout << "Probability at " << colVelocity[i] << "is " << colProbablityOfVelocity[i] << endl;
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.colProbability, colProbablityOfVelocity.data(), size1D);
// vector<double> impactFrequency = DEMImpactData;
// for (int s = 0; s < nFirstSolidBins; s++)
// for (int ss = 0; ss < nSecondSolidBins; ss++)
// for (int i = 0; i < nDEMBins; i++)
// {
// if (fAll[n2] > 0.0)
// impactFrequency[i] = (DEMImpactData[i] * timeStep) / demTimeStep;
// }
double critStDefNum = pData->critStDefNum;
double initPorosity = pData->initPorosity;
// cout << critStDefNum << "\t" << solDensity << "\t" << initPorosity << "\t" << bindVisc << endl;
double Ubreak = (2 * critStDefNum / solDensity) * (9 / 8.0) * (pow((1 - initPorosity), 2) / pow(initPorosity, 2)) * (9 / 16.0) * (bindVisc / DEMDiameter[0]);
// x_compartmentDEMIn.ubreak[0] = Ubreak;
copy_double_vector_fromHtoD(x_compartmentDEMIn.ubreak, &Ubreak, 1);
// cout << "Critical velocity for breakage is " << Ubreak << endl;
copy_double_vector_fromHtoD(x_compartmentDEMIn.impVelocity, velocity.data(),size1D);
int size1 = velocity.size();
double sum = 0.0;
for (int i = 0; i < size1; i++)
sum += velocity[i];
double averageVelocityBr = sum / nDEMBins;
double stdDevVelocityBr = 0.0;
double varianceVelocityBr = 0.0;
for (int i = 0; i < size1; ++i)
{
varianceVelocityBr += pow((velocity[i] - averageVelocityBr), 2) / nDEMBins;
}
stdDevVelocityBr = sqrt(varianceVelocityBr);
//double intVelocity = 0.0;
// cout << "Std Dev. of Velocity = " << stdDevVelocity << endl;
vector<double> breakageProbablityOfVelocity(size1, 0.0);
for (int i = 0; i < size1; i++)
{
if (velocity[i] != 0)
{
breakageProbablityOfVelocity[i] = (1 / (velocity[i] * sqrt(2 * M_PI) * stdDevVelocityBr)) * exp(-((log(velocity[i]) - averageVelocityBr) / (2 * pow(varianceVelocityBr, 2))));
// cout << "Probability at " << velocity[i] << "is " << breakageProbablityOfVelocity[i] << endl;
}
}
copy_double_vector_fromHtoD(x_compartmentDEMIn.brProbability, breakageProbablityOfVelocity.data(), size1D);
DUMP2D(DEMCollisionData);
DUMP(DEMDiameter);
DUMP(DEMImpactData);
DUMP(velocity);
//Initialize DEM data for compartment
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMDiameter, DEMDiameter.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMCollisionData, (linearize2DVector(DEMCollisionData)).data(), size2D);
copy_double_vector_fromHtoD(x_compartmentDEMIn.DEMImpactData, DEMImpactData.data(), size1D);
vector<double> liquidAdditionRateAllCompartments(nCompartments, 0.0);
double liqSolidRatio = pData->liqSolidRatio;
double throughput = pData->throughput;
double liqDensity = pData->liqDensity;
double liquidAddRate = (liqSolidRatio * throughput) / (liqDensity * 3600);
liquidAdditionRateAllCompartments[0] = liquidAddRate;
arrayOfDouble2D h_fAllCompartmentsOverTime;
arrayOfDouble2D h_externalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_internalVolumeBinsAllCompartmentsOverTime;
arrayOfDouble2D h_liquidBinsAllCompartmentsOverTime;
arrayOfDouble2D h_gasBinsAllCompartmentsOverTime;
double granulatorLength = pData->granulatorLength;
double partticleResTime = pData->partticleResTime;
double particleAveVelo = granulatorLength / partticleResTime;
vector<double> particleAverageVelocity(nCompartments, particleAveVelo);
//Initialize input data for compartment
copy_double_vector_fromHtoD(x_compartmentIn.vs, h_vs.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentIn.vss, h_vss.data(), size1D);
copy_double_vector_fromHtoD(x_compartmentIn.diameter, diameter.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sMeshXY, h_sMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssMeshXY, h_ssMeshXY.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sLow, h_sLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.sHigh, h_sHigh.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssLow, h_ssLow.data(), size2D);
copy_double_vector_fromHtoD(x_compartmentIn.ssHigh, h_ssHigh.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sAggregationCheck, h_sAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssAggregationCheck, h_ssAggregationCheck.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sInd, h_sInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssInd, h_ssInd.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sCheckB, h_sCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssCheckB, h_ssCheckB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.sIndB, h_sIndB.data(), size2D);
copy_integer_vector_fromHtoD(x_compartmentIn.ssIndB, h_ssIndB.data(), size2D);
vector<int> sieveGrid;
sieveGrid.push_back(38);
sieveGrid.push_back(63);
sieveGrid.push_back(90);
sieveGrid.push_back(125);
sieveGrid.push_back(250);
sieveGrid.push_back(355);
sieveGrid.push_back(500);
sieveGrid.push_back(710);
sieveGrid.push_back(850);
sieveGrid.push_back(1000);
sieveGrid.push_back(1400);
sieveGrid.push_back(2000);
sieveGrid.push_back(2380);
sieveGrid.push_back(4000);
size_t nSieveGrid = sieveGrid.size();
arrayOfDouble2D d10OverTime;
arrayOfDouble2D d50OverTime;
arrayOfDouble2D d90OverTime;
double time = stod(timeVal); // initial time to start PBM
double timeStep = 0.5; //1.0e-1;
vector<double> Time;
// double lastTime = time;
int timeIdxCount = 0;
// int lastTimeIdxCount = 0;
double premixTime = pData->premixTime;
double liqAddTime = pData->liqAddTime;
double postMixTime = pData->postMixTime;
double finalTime = premixTime + liqAddTime + postMixTime + stod(timeVal);
vector<double *> formationThroughAggregationOverTime;
vector<double *> depletionThroughAggregationOverTime;
vector<double *> formationThroughBreakageOverTime;
vector<double *> depletionThroughBreakageOverTime;
cout << "time" << endl;
// defining compartment varibale pointers
CompartmentVar compVar(size3D, size5D, 0), d_compVarCpy(size3D, size5D, 1), *d_compVar;
AggregationCompVar aggCompVar(size3D, size5D, 0), x_aggCompVar(size3D, size5D, 1), *d_aggCompVar;
BreakageCompVar brCompVar(size3D, size5D, 0), x_brCompVar(size3D, size5D, 1), *d_brCompVar;
// allocating memory for structures used for compartment calculations
err = cudaMalloc(&d_compartmentIn, sizeof(CompartmentIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : CompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_prevCompInData, sizeof(PreviousCompartmentIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : prevCompInData (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_compartmentDEMIn, sizeof(CompartmentDEMIn));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : compartmentDEMIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_compVar, sizeof(CompartmentVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : CompartmentVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_aggCompVar, sizeof(AggregationCompVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : AggregationCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_brCompVar, sizeof(BreakageCompVar));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : BreakageCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_compartmentOut, sizeof(CompartmentOut));
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMalloc : d_compartmentOut (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// copying data to the allocated GPU
cudaMemcpy(d_compartmentIn, &x_compartmentIn, sizeof(CompartmentIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_prevCompInData, &x_prevCompInData, sizeof(PreviousCompartmentIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : PreviousCompartmentIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compartmentDEMIn, &x_compartmentDEMIn, sizeof(CompartmentDEMIn), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentDEMIn (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compVar, &d_compVarCpy, sizeof(CompartmentVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double aggKernelConst = pData->aggKernelConst;
// x_aggCompVar.aggKernelConst[0] = aggKernelConst;
copy_double_vector_fromHtoD(x_aggCompVar.aggKernelConst, &aggKernelConst, 1);
double brkKernelConst = pData->brkKernelConst;
// x_brCompVar.brkKernelConst[0] = brkKernelConst;
copy_double_vector_fromHtoD(x_brCompVar.brkKernelConst, &brkKernelConst, 1);
cudaMemcpy(d_aggCompVar, &x_aggCompVar, sizeof(AggregationCompVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : AggregationCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_brCompVar, &x_brCompVar, sizeof(BreakageCompVar), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : BreakageCompVar (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(d_compartmentOut, &x_compartmentOut, sizeof(CompartmentOut), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : compartmentOut (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// vector<double> h_formationThroughAggregation(nCompartments, 0.0);
// vector<double> h_depletionThroughAggregation(nCompartments, 0.0);
// vector<double> h_formationThroughBreakage(nCompartments, 0.0);
// vector<double> h_depletionThroughBreakage(nCompartments, 0.0);
double *d_formationThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_depletionThroughAggregation = device_alloc_double_vector(nCompartments);
double *d_formationThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_depletionThroughBreakage = device_alloc_double_vector(nCompartments);
double *d_fAllCompartments = device_alloc_double_vector(size3D);
double *d_flAllCompartments = device_alloc_double_vector(size3D);
double *d_fgAllCompartments = device_alloc_double_vector(size3D);
double *d_liquidAdditionRateAllCompartments = device_alloc_double_vector(nCompartments);
double *d_fIn = device_alloc_double_vector(size2D);
copy_double_vector_fromHtoD(d_liquidAdditionRateAllCompartments, liquidAdditionRateAllCompartments.data(), nCompartments);
copy_double_vector_fromHtoD(d_fIn, h_fIn.data(), size2D);
// dim3 compKernel_nblocks, compKernel_nthreads;
// compKernel_nblocks = dim3(nCompartments,1,1);
// compKernel_nthreads = dim3(size2D, size2D,1);
// int compKernel_nblocks = 16;
// int compKernel_nthreads = size2D * size2D;
// cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 1792);
// double granulatorLength = pData->granulatorLength;
// double partticleResTime = pData->partticleResTime;
// double premixTime = pData->premixTime;
// double liqAddTime = pData->liqAddTime;
double consConst = pData->consConst;
double minPorosity = pData->minPorosity;
double granSatFactor = pData->granSatFactor;
int threads = size2D;
double initialTime = stod(timeVal);
CompartmentOut h_results(size2D, size5D, 1);
// cudaDeviceSynchronize();
while (time <= finalTime)
{
copy_double_vector_fromHtoD(d_fAllCompartments, h_fAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_flAllCompartments, h_flAllCompartments.data(), size3D);
copy_double_vector_fromHtoD(d_fgAllCompartments, h_fgAllCompartments.data(), size3D);
cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 0);
launchCompartment<<<nCompartments,threads>>>(d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar,
time, timeStep, initialTime, d_fAllCompartments, d_flAllCompartments, d_fgAllCompartments,
d_liquidAdditionRateAllCompartments, size2D, size3D, size4D, d_fIn, initPorosity, demTimeStep, nFirstSolidBins, nSecondSolidBins,
granulatorLength, partticleResTime, premixTime, liqAddTime, consConst, minPorosity, nCompartments, granSatFactor, aggKernelConst, brkKernelConst);
// cudaDeviceSynchronize();
err = cudaSuccess; // check kernel launach
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch launchCompartment kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 compKernel_nblocks, compKernel_nthreads;
cudaDeviceSynchronize();
// vector<int> h_idx4(size5D, 0);
// int *d_idx4 = device_alloc_integer_vector(size5D);
// copy_integer_vector_fromHtoD(d_idx4, h_idx4.data(), size5D);
performAggCalculations<<<nCompartments,threads>>>(d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_aggCompVar, time, timeStep, initialTime, demTimeStep, nFirstSolidBins, nSecondSolidBins, nCompartments, aggKernelConst/* , d_idx4 */);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Failed to launch agg kernel (error code %s)!\n", cudaGetErrorString(err));
}
// copy_integer_vector_fromDtoH(h_idx4.data(), d_idx4, size5D);
cudaDeviceSynchronize();
performBreakageCalculations<<<nCompartments,threads>>>(d_prevCompInData, d_compartmentIn, d_compartmentDEMIn, d_compartmentOut, d_compVar, d_brCompVar, time, timeStep, initialTime, demTimeStep, nFirstSolidBins, nSecondSolidBins, brkKernelConst);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", cudaGetErrorString(err));
}
cudaDeviceSynchronize();
consolidationAndMovementCalcs<<<nCompartments,threads>>>(d_compartmentIn, d_prevCompInData, d_compartmentOut, d_compartmentDEMIn, d_compVar, d_aggCompVar, d_brCompVar, nCompartments, granulatorLength, partticleResTime,
time, timeStep, premixTime, liqAddTime, initialTime, nFirstSolidBins, nSecondSolidBins, consConst, minPorosity);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Failed to launch breakage kernel (error code %s)!\n", cudaGetErrorString(err));
}
// cout << "Compartment ended " << endl;
cudaDeviceSynchronize();
// Copying data strcutres reqd for calculation
err = cudaMemcpy(&h_results, d_compartmentOut, sizeof(CompartmentOut), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to cudaMemcpy : CompartmentOut D to Hmake (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// copy necessary variables back to the CPU
copy_double_vector_fromDtoH(compartmentOut.dfAlldt, h_results.dfAlldt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfLiquiddt, h_results.dfLiquiddt, size3D);
copy_double_vector_fromDtoH(compartmentOut.dfGasdt, h_results.dfGasdt, size3D);
copy_double_vector_fromDtoH(compartmentOut.liquidBins, h_results.liquidBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.gasBins, h_results.gasBins, size3D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughAggregation, h_results.formationThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughAggregation, h_results.depletionThroughAggregation, size1D);
copy_double_vector_fromDtoH(compartmentOut.formationThroughBreakage, h_results.formationThroughBreakage, size1D);
copy_double_vector_fromDtoH(compartmentOut.depletionThroughBreakage, h_results.depletionThroughBreakage, size1D);
// copy_double_vector_fromDtoH(h_fAllCompartments.data(), d_fAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_flAllCompartments.data(), d_flAllCompartments, size3D);
// copy_double_vector_fromDtoH(h_fgAllCompartments.data(), d_fgAllCompartments, size3D);
formationThroughAggregationOverTime.push_back(compartmentOut.formationThroughAggregation);
depletionThroughAggregationOverTime.push_back(compartmentOut.depletionThroughAggregation);
formationThroughBreakageOverTime.push_back(compartmentOut.formationThroughBreakage);
depletionThroughBreakageOverTime.push_back(compartmentOut.depletionThroughBreakage);
// for (int w = 0; w < nCompartments; w++)
// {
// cout << "Compartment Number = " << w +1 << endl;
// cout << "compartmentOut.formationThroughAggregation = " << compartmentOut.formationThroughAggregation[w] << endl;
// cout << "compartmentOut.depletionThroughAggregation = " << compartmentOut.depletionThroughAggregation[w] << endl;
// cout << "Agg Ratio = " << compartmentOut.formationThroughAggregation[w] / compartmentOut.depletionThroughAggregation[w] << endl;
// cout << "compartmentOut.formationThroughBreakage = " << compartmentOut.formationThroughBreakage[w] << endl;
// cout << "compartmentOut.depletionThroughBreakage = " << compartmentOut.depletionThroughBreakage[w] << endl;
// cout << "Breakage Ratio = " << compartmentOut.formationThroughBreakage[w] / compartmentOut.depletionThroughBreakage[w] << endl;
// }
double maxofthree = -DBL_MAX;
double maxAll = -DBL_MAX;
double maxLiquid = -DBL_MAX;
double maxGas = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
// cout << "compartmentOut.dfAlldt[" << i << "] is " << compartmentOut.dfAlldt[i] << endl;
if (fabs(h_fAllCompartments[i]) > 1.0e-16)
maxAll = max(maxAll, -compartmentOut.dfAlldt[i] / h_fAllCompartments[i]);
if (fabs(h_flAllCompartments[i]) > 1.0e-16)
maxLiquid = max(maxLiquid, -compartmentOut.dfLiquiddt[i] / h_flAllCompartments[i]);
if (fabs(h_fgAllCompartments[i]) > 1.0e-16)
maxGas = max(maxGas, -compartmentOut.dfGasdt[i] / h_fgAllCompartments[i]);
maxofthree = max(maxofthree, max(maxAll, max(maxLiquid, maxGas)));
}
cout << "maxAll = " << maxAll << endl;
cout << "maxLiquid = " << maxLiquid << endl;
cout << "maxGas = " << maxGas << endl;
cout << "maxofthree = " << maxofthree << endl;
while (maxofthree < 0.1 / timeStep && timeStep < 0.25)
timeStep *= 2.0;
while (maxofthree > 0.1 / timeStep && timeStep > 5.0e-5)
timeStep /= 2.0;
int nanCount = 0;
double minfAll = -DBL_MAX;
for (size_t i = 0; i < size3D; i++)
{
double value = 0.0;
h_fAllCompartments[i] = h_fAllCompartments[i] + compartmentOut.dfAlldt[i] * timeStep;
// cout << " h_fAllCompartments[" << i <<"] is " << h_fAllCompartments[i] << endl;
if (std::isnan(h_fAllCompartments[i]))
nanCount++;
value = h_flAllCompartments[i] + compartmentOut.dfLiquiddt[i] * timeStep;
h_flAllCompartments[i] = value > 0.0 ? value : 0.0;
value = h_fgAllCompartments[i] + compartmentOut.dfGasdt[i] * timeStep;
h_fgAllCompartments[i] = value > 0.0 ? value : 0.0;
}
if (nanCount)
{
cout << endl << "*****fAllCompartments has " << nanCount << "nan values******" << endl << endl;
DUMPCSV(h_fAllCompartments);
exit(EXIT_FAILURE);
}
int countnegfAll = 0;
minfAll = getMinimumOfArray(h_fAllCompartments);
if (minfAll < -1.0e-16 && countnegfAll > 0.1 * nCompartments * nFirstSolidBins * nSecondSolidBins)
{
//int mpi_err = 0;
cout << endl;
//DUMP3DCSV(dfdtAllCompartments);
//DUMP3DCSV(fAllCompartments);
//cout << "My process id = " << mpi_id << endl;
cout << "minfAll" << minfAll << endl;
cout << "******fAllCompartments has negative values********" << endl;
cout << "Number of negative values = " << countnegfAll << endl;
DUMPCSV(h_fAllCompartments);
cout << "Aborting..." << endl;
return 1;
}
// BIN recalculation
for (int c = 0; c < nCompartments; c++)
{
vector<double> liquidBins(size2D, 0.0);
vector<double> gasBins(size2D, 0.0);
vector<double> internalLiquid(size2D, 0.0);
vector<double> externalLiquid(size2D, 0.0);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
int n2 = s * nSecondSolidBins + ss;
if (fabs(h_fAllCompartments[m]) > 1.0e-16)
{
liquidBins[n2] = h_flAllCompartments[m] / h_fAllCompartments[m];
gasBins[n2] = h_fgAllCompartments[m] / h_fAllCompartments[m];
}
internalLiquid[n2] = min(granSatFactor * gasBins[n2], liquidBins[n2]);
externalLiquid[n2] = max(0.0, liquidBins[n2] - internalLiquid[n2]);
double value = compartmentIn.sMeshXY[n2] + compartmentIn.ssMeshXY[n2] + gasBins[n2];
h_internalVolumeBins[n2] = value + internalLiquid[n2];
h_externalVolumeBins[n2] = value + liquidBins[n2];
h_liquidBinsAllCompartments[m] = liquidBins[n2];
h_gasBinsAllCompartments[m] = gasBins[n2];
h_externalVolumeBinsAllCompartments[m] = h_externalVolumeBins[n2];
h_internalVolumeBinsAllCompartments[m] = h_internalVolumeBins[n2];
}
}
vector<double> d10OverCompartment(nCompartments, 0.0);
vector<double> d50OverCompartment(nCompartments, 0.0);
vector<double> d90OverCompartment(nCompartments, 0.0);
for (int c = 0; c < nCompartments; c++)
{
arrayOfDouble2D diameter = getArrayOfDouble2D(nFirstSolidBins, nSecondSolidBins);
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
diameter[s][ss] = cbrt((6 / M_PI) * h_externalVolumeBinsAllCompartments[m]) * 1.0e6;
}
vector<double> totalVolumeGrid(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid - 1; d++)
for (size_t s = 0; s < nFirstSolidBins; s++)
for (size_t ss = 0; ss < nSecondSolidBins; ss++)
{
int m = c * nFirstSolidBins * nSecondSolidBins + s * nSecondSolidBins + ss;
if (diameter[s][ss] < sieveGrid[d + 1] && diameter[s][ss] >= sieveGrid[d])
totalVolumeGrid[d] += h_fAllCompartments[m] * h_externalVolumeBinsAllCompartments[m];
}
double sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
sum += totalVolumeGrid[d];
vector<double> volumeDistribution(nSieveGrid, 0.0);
for (size_t d = 0; d < nSieveGrid; d++)
if(sum > 1e-16)
volumeDistribution[d] = totalVolumeGrid[d] / sum;
vector<double> cumulativeVolumeDistribution(nSieveGrid, 0.0);
sum = 0.0;
for (size_t d = 0; d < nSieveGrid; d++)
{
sum += volumeDistribution[d];
cumulativeVolumeDistribution[d] = sum;
}
double d10 = 0.1 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d50 = 0.5 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
double d90 = 0.9 * (sieveGrid[1] / cumulativeVolumeDistribution[0]);
for (size_t d = 1; d < nSieveGrid; d++)
{
double value1 = (sieveGrid[d] - sieveGrid[d - 1]) / (cumulativeVolumeDistribution[d] - cumulativeVolumeDistribution[d - 1]);
double value2 = sieveGrid[d - 1];
if (cumulativeVolumeDistribution[d - 1] < 0.5 && cumulativeVolumeDistribution[d] >= 0.5)
{
double value = 0.5 - cumulativeVolumeDistribution[d - 1];
d50 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.1 - cumulativeVolumeDistribution[d - 1];
d10 = value * value1 + value2;
}
if (cumulativeVolumeDistribution[d - 1] < 0.1 && cumulativeVolumeDistribution[d] >= 0.1)
{
double value = 0.9 - cumulativeVolumeDistribution[d - 1];
d90 = value * value1 + value2;
}
}
d10OverCompartment[c] = d10;
d50OverCompartment[c] = d50;
d10OverCompartment[c] = d90;
}
Time.push_back(time);
d10OverTime.push_back(d10OverCompartment);
d50OverTime.push_back(d50OverCompartment);
d90OverTime.push_back(d90OverCompartment);
//SAVING OVER TIME
//cout << endl << "************Saving over time" << endl << endl;
h_fAllCompartmentsOverTime.push_back(h_fAllCompartments);
h_externalVolumeBinsAllCompartmentsOverTime.push_back(h_externalVolumeBinsAllCompartments);
h_internalVolumeBinsAllCompartmentsOverTime.push_back(h_internalVolumeBinsAllCompartments);
h_liquidBinsAllCompartmentsOverTime.push_back(h_liquidBinsAllCompartments);
h_gasBinsAllCompartmentsOverTime.push_back(h_gasBinsAllCompartments);
cout << "time = " << time << endl;
cout << "timeStep = " << timeStep << endl;
cout << endl;
timeIdxCount++;
time += timeStep;
// free_double_vector_device(h_results.dfAlldt);
// free_double_vector_device(h_results.dfLiquiddt);
// free_double_vector_device(h_results.dfGasdt);
// free_double_vector_device(h_results.liquidBins);
// free_double_vector_device(h_results.gasBins);
// free_double_vector_device(h_results.formationThroughAggregation);
// free_double_vector_device(h_results.depletionThroughAggregation);
// free_double_vector_device(h_results.formationThroughBreakage);
// free_double_vector_device(h_results.depletionThroughBreakage);
}
size_t nTimeSteps = Time.size();
cout << endl
<< "nTimeSteps = " << nTimeSteps << endl
<< endl;
//dump values for ratio plots
dumpDiaCSVpointer(Time, formationThroughAggregationOverTime, Time.size() * nCompartments, string("FormationThroughAggregation"));
dumpDiaCSVpointer(Time, depletionThroughAggregationOverTime, Time.size() * nCompartments, string("DepletionThroughAggregation"));
dumpDiaCSVpointer(Time, formationThroughBreakageOverTime, Time.size() * nCompartments, string("FormationThroughBreakage"));
dumpDiaCSVpointer(Time, depletionThroughBreakageOverTime, Time.size() * nCompartments, string("DepletionThroughBreakage"));
dumpDiaCSV(Time, d50OverTime, string(("d50OverTime")));
double endTime = static_cast<double>(clock()) / static_cast<double>(CLOCKS_PER_SEC);
cout << "That took " << endTime - startTime << " seconds" << endl;
cout << "Code End" << endl;
return 0;
// vector<double> h(size4D, 0.0);
// for (int i = 0; i < size5D; i++)
// {
// cout << "At i = " << i << " kernel = " << compartmentOut.aggregationKernel[i] << endl;
// }
// cudaFree(d_vs);
// cudaFree(d_vss);
// cudaFree(d_sMeshXY);
// cudaFree(d_ssMeshXY);
// cudaFree(d_compartmentIn);
} |
f4dfbad4d9428d641188d84e071dddbdc4fba24b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <sstream>
#include <cstdlib>
#include <chrono>
#include <roctracer/roctx.h>
#include <unistd.h>
#include "common/cuda_check.hpp"
#include "common/common.hpp"
enum class op_type
{
READ,
WRITE
};
template <typename data_type, op_type op>
__global__ void stream_thread(data_type *ptr, const size_t size,
data_type *output, const data_type val)
{
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
size_t n = size / sizeof(data_type);
data_type accum = 0;
for (; tid < n; tid += blockDim.x * gridDim.x)
if (op == op_type::READ)
accum += ptr[tid];
else
ptr[tid] = val;
if (op == op_type::READ)
output[threadIdx.x + blockIdx.x * blockDim.x] = accum;
}
int main(void)
{
const long pageSize = sysconf(_SC_PAGESIZE);
std::stringstream buffer;
RT_CHECK(hipFree(0));
size_t memTotal, memAvail;
RT_CHECK(hipSetDevice(0));
RT_CHECK(hipMemGetInfo(&memAvail, &memTotal));
hipDeviceProp_t prop;
RT_CHECK(hipGetDeviceProperties(&prop, 0));
const int numSMs = prop.multiProcessorCount;
const int threadsPerSM = prop.maxThreadsPerMultiProcessor;
const int major = prop.major;
int blocksPerSM;
if (major < 3)
{
blocksPerSM = 8;
}
else if (major < 6)
{
blocksPerSM = 16;
}
else
{
blocksPerSM = 32;
}
std::cout << prop.name << ":\n";
std::cout << "\t" << numSMs << " SMs\n";
std::cout << "\t" << threadsPerSM << " threads/SM\n";
std::cout << "\t" << blocksPerSM << " blocks/SM\n";
typedef float data_type;
data_type *output, *ptr;
for (size_t n = 128; n < memAvail; n *= 2)
{
dim3 dimGrid;
dim3 dimBlock;
for (dimGrid.x = numSMs; dimGrid.x <= blocksPerSM * numSMs; dimGrid.x = dimGrid.x * 2)
{
for (dimBlock.x = 32; dimBlock.x <= 1024; dimBlock.x *= 2)
{
RT_CHECK(hipMallocManaged(&ptr, n));
RT_CHECK(hipMalloc(&output, dimGrid.x * dimBlock.x * sizeof(data_type)));
RT_CHECK(hipDeviceSynchronize());
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( stream_thread<data_type, op_type::READ>), dim3(dimGrid), dim3(dimBlock), 0, 0, ptr, n, output, 1.0);
RT_CHECK(hipDeviceSynchronize());
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
std::cout << "n=" << n << " (" << dimGrid.x << "x" << dimBlock.x << ") " << elapsed_seconds.count() << "s " << n / 1e9 / elapsed_seconds.count() << "GB/s\n";
RT_CHECK(hipFree(ptr));
RT_CHECK(hipFree(output));
}
}
}
return 0;
}
| f4dfbad4d9428d641188d84e071dddbdc4fba24b.cu | #include <cstdio>
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <sstream>
#include <cstdlib>
#include <chrono>
#include <nvToolsExt.h>
#include <unistd.h>
#include "common/cuda_check.hpp"
#include "common/common.hpp"
enum class op_type
{
READ,
WRITE
};
template <typename data_type, op_type op>
__global__ void stream_thread(data_type *ptr, const size_t size,
data_type *output, const data_type val)
{
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
size_t n = size / sizeof(data_type);
data_type accum = 0;
for (; tid < n; tid += blockDim.x * gridDim.x)
if (op == op_type::READ)
accum += ptr[tid];
else
ptr[tid] = val;
if (op == op_type::READ)
output[threadIdx.x + blockIdx.x * blockDim.x] = accum;
}
int main(void)
{
const long pageSize = sysconf(_SC_PAGESIZE);
std::stringstream buffer;
RT_CHECK(cudaFree(0));
size_t memTotal, memAvail;
RT_CHECK(cudaSetDevice(0));
RT_CHECK(cudaMemGetInfo(&memAvail, &memTotal));
cudaDeviceProp prop;
RT_CHECK(cudaGetDeviceProperties(&prop, 0));
const int numSMs = prop.multiProcessorCount;
const int threadsPerSM = prop.maxThreadsPerMultiProcessor;
const int major = prop.major;
int blocksPerSM;
if (major < 3)
{
blocksPerSM = 8;
}
else if (major < 6)
{
blocksPerSM = 16;
}
else
{
blocksPerSM = 32;
}
std::cout << prop.name << ":\n";
std::cout << "\t" << numSMs << " SMs\n";
std::cout << "\t" << threadsPerSM << " threads/SM\n";
std::cout << "\t" << blocksPerSM << " blocks/SM\n";
typedef float data_type;
data_type *output, *ptr;
for (size_t n = 128; n < memAvail; n *= 2)
{
dim3 dimGrid;
dim3 dimBlock;
for (dimGrid.x = numSMs; dimGrid.x <= blocksPerSM * numSMs; dimGrid.x = dimGrid.x * 2)
{
for (dimBlock.x = 32; dimBlock.x <= 1024; dimBlock.x *= 2)
{
RT_CHECK(cudaMallocManaged(&ptr, n));
RT_CHECK(cudaMalloc(&output, dimGrid.x * dimBlock.x * sizeof(data_type)));
RT_CHECK(cudaDeviceSynchronize());
auto start = std::chrono::high_resolution_clock::now();
stream_thread<data_type, op_type::READ><<<dimGrid, dimBlock>>>(ptr, n, output, 1.0);
RT_CHECK(cudaDeviceSynchronize());
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
std::cout << "n=" << n << " (" << dimGrid.x << "x" << dimBlock.x << ") " << elapsed_seconds.count() << "s " << n / 1e9 / elapsed_seconds.count() << "GB/s\n";
RT_CHECK(cudaFree(ptr));
RT_CHECK(cudaFree(output));
}
}
}
return 0;
}
|
1bc85e591f74e12fdc4c10d6cfc509a6c88a1e35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( ROIPoolForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
hipLaunchKernelGGL(( ROIPoolBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream,
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 1bc85e591f74e12fdc4c10d6cfc509a6c88a1e35.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file roi_pooling.cu
* \brief roi pooling operator
* \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
*/
#include "./roi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
template<typename Dtype>
__global__ void ROIPoolForwardKernel(const int count, const Dtype* bottom_data,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data,
Dtype* argmax_data) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
if (roi_batch_ind < 0) {
top_data[index] = 0;
argmax_data[index] = 0;
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((bottom_rois[4] + pad_h) * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = (Dtype)maxidx;
}
}
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *bottom_data = data.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *top_data = out.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
ROIPoolForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, bottom_data, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_rois, top_data, argmax_data);
}
template<typename Dtype>
__global__ void ROIPoolBackwardAccKernel(const int count, const Dtype* top_diff,
const Dtype* argmax_data, const int num_rois,
const float spatial_scale, const float pad_ratio,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
Dtype pad_w = (bottom_rois[3] - bottom_rois[1] + 1) * pad_ratio;
Dtype pad_h = (bottom_rois[4] - bottom_rois[2] + 1) * pad_ratio;
int roi_start_w = round((offset_bottom_rois[1] - pad_w) * spatial_scale);
int roi_start_h = round((offset_bottom_rois[2] - pad_h) * spatial_scale);
int roi_end_w = round((offset_bottom_rois[3] + pad_w) * spatial_scale);
int roi_end_h = round((offset_bottom_rois[4] + pad_h) * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (static_cast<int>(offset_argmax_data[ph * pooled_width + pw]) == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] += gradient;
}
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
const Dtype *top_diff = out_grad.dptr_;
const Dtype *bottom_rois = bbox.dptr_;
Dtype *bottom_diff = in_grad.dptr_;
Dtype *argmax_data = max_idx.dptr_;
const int count = in_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward");
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
ROIPoolBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>(
count, top_diff, argmax_data, num_rois, spatial_scale, pad_ratio, channels, height, width,
pooled_height, pooled_width, bottom_diff, bottom_rois);
}
} // namespace cuda
template<typename Dtype>
inline void ROIPoolForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolForward(out, data, bbox, max_idx, spatial_scale, pad_ratio);
}
template<typename Dtype>
inline void ROIPoolBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad,
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 2, Dtype> &bbox,
const Tensor<gpu, 4, Dtype> &max_idx,
const float spatial_scale,
const float pad_ratio) {
cuda::ROIPoolBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale, pad_ratio);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
c23ea7cb7a6aac483feb68c54fdb38aa2287002f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* Code edits and additions
* Copyright 2018 Rommel Quintanilla <rommel@blazingdb.com>
*/
#include <cmath>
#include <algorithm>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include "cudf.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "rmm/thrust_rmm_allocator.h"
#include <bitmask/legacy_bitmask.hpp>
template<typename T, typename Tout, typename F>
__global__
void gpu_unary_op(const T *data, const gdf_valid_type *valid,
gdf_size_type size, Tout *results, F functor) {
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
if ( valid ) { // has valid mask
for (int i=start; i<size; i+=step) {
if ( gdf_is_valid(valid, i) )
results[i] = functor.apply(data[i]);
}
} else { // no valid mask
for (int i=start; i<size; i+=step) {
results[i] = functor.apply(data[i]);
}
}
}
template<typename T, typename Tout, typename F>
struct UnaryOp {
static
gdf_error launch(gdf_column *input, gdf_column *output) {
// Return immediately for empty inputs
if((0==input->size))
{
return GDF_SUCCESS;
}
/* check for size of the columns */
if (input->size != output->size) {
return GDF_COLUMN_SIZE_MISMATCH;
}
// find optimal blocksize
int mingridsize, blocksize;
CUDA_TRY(
hipOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize,
gpu_unary_op<T, Tout, F>)
);
// find needed gridsize
int neededgridsize = (input->size + blocksize - 1) / blocksize;
int gridsize = ::min(neededgridsize, mingridsize);
F functor;
hipLaunchKernelGGL(( gpu_unary_op), dim3(gridsize), dim3(blocksize), 0, 0,
// input
(const T*)input->data, input->valid, input->size,
// output
(Tout*)output->data,
// action
functor
);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
};
template<typename T, typename F>
struct MathOp {
static
gdf_error launch(gdf_column *input, gdf_column *output) {
return UnaryOp<T, T, F>::launch(input, output);
}
};
#define DEF_UNARY_OP_REAL(F) \
gdf_error F##_generic(gdf_column *input, gdf_column *output) { \
switch ( input->dtype ) { \
case GDF_FLOAT32: return F##_f32(input, output); \
case GDF_FLOAT64: return F##_f64(input, output); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
#define DEF_CAST_OP(TO) \
gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \
switch ( input->dtype ) { \
case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \
case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \
case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \
case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \
case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \
case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \
case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \
case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
#define DEF_CAST_OP_TS(TO) \
gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\
switch ( input->dtype ) { \
case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \
case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \
case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \
case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \
case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \
case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \
case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \
case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
// trig functions
template<typename T>
struct DeviceSin {
__device__
T apply(T data) {
return std::sin(data);
}
};
template<typename T>
struct DeviceCos {
__device__
T apply(T data) {
return std::cos(data);
}
};
template<typename T>
struct DeviceTan {
__device__
T apply(T data) {
return std::tan(data);
}
};
template<typename T>
struct DeviceArcSin {
__device__
T apply(T data) {
return std::asin(data);
}
};
template<typename T>
struct DeviceArcCos {
__device__
T apply(T data) {
return std::acos(data);
}
};
template<typename T>
struct DeviceArcTan {
__device__
T apply(T data) {
return std::atan(data);
}
};
DEF_UNARY_OP_REAL(gdf_sin)
gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceSin<float> >::launch(input, output);
}
gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceSin<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_cos)
gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceCos<float> >::launch(input, output);
}
gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceCos<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_tan)
gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceTan<float> >::launch(input, output);
}
gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceTan<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_asin)
gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcSin<float> >::launch(input, output);
}
gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcSin<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_acos)
gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcCos<float> >::launch(input, output);
}
gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcCos<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_atan)
gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcTan<float> >::launch(input, output);
}
gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcTan<double> >::launch(input, output);
}
// exponential functions
template<typename T>
struct DeviceExp {
__device__
T apply(T data) {
return ::exp(data);
}
};
template<typename T>
struct DeviceLog {
__device__
T apply(T data) {
return ::log(data);
}
};
DEF_UNARY_OP_REAL(gdf_exp)
gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceExp<float> >::launch(input, output);
}
gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceExp<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_log)
gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceLog<float> >::launch(input, output);
}
gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceLog<double> >::launch(input, output);
}
// exponential functions
template<typename T>
struct DeviceSqrt {
__device__
T apply(T data) {
return std::sqrt(data);
}
};
DEF_UNARY_OP_REAL(gdf_sqrt)
gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceSqrt<float> >::launch(input, output);
}
gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceSqrt<double> >::launch(input, output);
}
// rounding functions
template<typename T>
struct DeviceCeil {
__device__
T apply(T data) {
return ::ceil(data);
}
};
template<typename T>
struct DeviceFloor {
__device__
T apply(T data) {
return ::floor(data);
}
};
DEF_UNARY_OP_REAL(gdf_ceil)
gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceCeil<float> >::launch(input, output);
}
gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceCeil<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_floor)
gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceFloor<float> >::launch(input, output);
}
gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceFloor<double> >::launch(input, output);
}
// casting
template<typename From, typename To>
struct DeviceCast {
__device__
To apply(From data) {
return (To)data;
}
};
template<typename From, typename To, int64_t units_factor>
struct UpCasting {
__device__
To apply(From data) {
return (To)(data*units_factor);
}
};
template<typename From, typename To, int64_t units_factor>
struct DownCasting {
__device__
To apply(From data) {
return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative
}
};
// Castings are differentiate between physical and logical ones.
// In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double)
// on the other hand, casting between date timestamps needs also perform some calculations according to the time unit:
// - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day
// - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3
// No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing
#define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \
gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \
GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \
\
\
output->dtype = LTO; \
if (input->valid && output->valid) { \
thrust::copy(rmm::exec_policy()->on(0), input->valid, input->valid + gdf_num_bitmask_elements(input->size), output->valid); \
} \
\
/* Handling datetime logical castings */ \
if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
/* Handling only physical castings */ \
return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \
}
// Castings functions where Timestamp is the destination type
#define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \
gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \
GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \
\
\
output->dtype = LTO; \
output->dtype_info.time_unit = time_unit; \
if (input->valid && output->valid) { \
thrust::copy(rmm::exec_policy()->on(0), input->valid, input->valid + gdf_num_bitmask_elements(input->size), output->valid); \
} \
\
/* Handling datetime logical castings */ \
if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \
{ \
if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
} \
/* Handling only physical castings */ \
return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \
}
#define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \
DEF_CAST_OP(ABREV) \
DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \
DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \
DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \
DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \
DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \
DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \
DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \
DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE)
#define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \
DEF_CAST_OP_TS(ABREV) \
DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE)
DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32)
DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64)
DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8)
DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32)
DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64)
DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32)
DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64)
DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
| c23ea7cb7a6aac483feb68c54fdb38aa2287002f.cu | /*
*
* Code edits and additions
* Copyright 2018 Rommel Quintanilla <rommel@blazingdb.com>
*/
#include <cmath>
#include <algorithm>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include "cudf.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "rmm/thrust_rmm_allocator.h"
#include <bitmask/legacy_bitmask.hpp>
template<typename T, typename Tout, typename F>
__global__
void gpu_unary_op(const T *data, const gdf_valid_type *valid,
gdf_size_type size, Tout *results, F functor) {
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
if ( valid ) { // has valid mask
for (int i=start; i<size; i+=step) {
if ( gdf_is_valid(valid, i) )
results[i] = functor.apply(data[i]);
}
} else { // no valid mask
for (int i=start; i<size; i+=step) {
results[i] = functor.apply(data[i]);
}
}
}
template<typename T, typename Tout, typename F>
struct UnaryOp {
static
gdf_error launch(gdf_column *input, gdf_column *output) {
// Return immediately for empty inputs
if((0==input->size))
{
return GDF_SUCCESS;
}
/* check for size of the columns */
if (input->size != output->size) {
return GDF_COLUMN_SIZE_MISMATCH;
}
// find optimal blocksize
int mingridsize, blocksize;
CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize,
gpu_unary_op<T, Tout, F>)
);
// find needed gridsize
int neededgridsize = (input->size + blocksize - 1) / blocksize;
int gridsize = std::min(neededgridsize, mingridsize);
F functor;
gpu_unary_op<<<gridsize, blocksize>>>(
// input
(const T*)input->data, input->valid, input->size,
// output
(Tout*)output->data,
// action
functor
);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
};
template<typename T, typename F>
struct MathOp {
static
gdf_error launch(gdf_column *input, gdf_column *output) {
return UnaryOp<T, T, F>::launch(input, output);
}
};
#define DEF_UNARY_OP_REAL(F) \
gdf_error F##_generic(gdf_column *input, gdf_column *output) { \
switch ( input->dtype ) { \
case GDF_FLOAT32: return F##_f32(input, output); \
case GDF_FLOAT64: return F##_f64(input, output); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
#define DEF_CAST_OP(TO) \
gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \
switch ( input->dtype ) { \
case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \
case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \
case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \
case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \
case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \
case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \
case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \
case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
#define DEF_CAST_OP_TS(TO) \
gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\
switch ( input->dtype ) { \
case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \
case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \
case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \
case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \
case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \
case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \
case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \
case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \
default: return GDF_UNSUPPORTED_DTYPE; \
} \
}
// trig functions
template<typename T>
struct DeviceSin {
__device__
T apply(T data) {
return std::sin(data);
}
};
template<typename T>
struct DeviceCos {
__device__
T apply(T data) {
return std::cos(data);
}
};
template<typename T>
struct DeviceTan {
__device__
T apply(T data) {
return std::tan(data);
}
};
template<typename T>
struct DeviceArcSin {
__device__
T apply(T data) {
return std::asin(data);
}
};
template<typename T>
struct DeviceArcCos {
__device__
T apply(T data) {
return std::acos(data);
}
};
template<typename T>
struct DeviceArcTan {
__device__
T apply(T data) {
return std::atan(data);
}
};
DEF_UNARY_OP_REAL(gdf_sin)
gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceSin<float> >::launch(input, output);
}
gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceSin<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_cos)
gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceCos<float> >::launch(input, output);
}
gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceCos<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_tan)
gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceTan<float> >::launch(input, output);
}
gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceTan<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_asin)
gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcSin<float> >::launch(input, output);
}
gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcSin<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_acos)
gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcCos<float> >::launch(input, output);
}
gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcCos<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_atan)
gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceArcTan<float> >::launch(input, output);
}
gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceArcTan<double> >::launch(input, output);
}
// exponential functions
template<typename T>
struct DeviceExp {
__device__
T apply(T data) {
return std::exp(data);
}
};
template<typename T>
struct DeviceLog {
__device__
T apply(T data) {
return std::log(data);
}
};
DEF_UNARY_OP_REAL(gdf_exp)
gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceExp<float> >::launch(input, output);
}
gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceExp<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_log)
gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceLog<float> >::launch(input, output);
}
gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceLog<double> >::launch(input, output);
}
// exponential functions
template<typename T>
struct DeviceSqrt {
__device__
T apply(T data) {
return std::sqrt(data);
}
};
DEF_UNARY_OP_REAL(gdf_sqrt)
gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceSqrt<float> >::launch(input, output);
}
gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceSqrt<double> >::launch(input, output);
}
// rounding functions
template<typename T>
struct DeviceCeil {
__device__
T apply(T data) {
return std::ceil(data);
}
};
template<typename T>
struct DeviceFloor {
__device__
T apply(T data) {
return std::floor(data);
}
};
DEF_UNARY_OP_REAL(gdf_ceil)
gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceCeil<float> >::launch(input, output);
}
gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceCeil<double> >::launch(input, output);
}
DEF_UNARY_OP_REAL(gdf_floor)
gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) {
return MathOp<float, DeviceFloor<float> >::launch(input, output);
}
gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) {
return MathOp<double, DeviceFloor<double> >::launch(input, output);
}
// casting
template<typename From, typename To>
struct DeviceCast {
__device__
To apply(From data) {
return (To)data;
}
};
template<typename From, typename To, int64_t units_factor>
struct UpCasting {
__device__
To apply(From data) {
return (To)(data*units_factor);
}
};
template<typename From, typename To, int64_t units_factor>
struct DownCasting {
__device__
To apply(From data) {
return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative
}
};
// Castings are differentiate between physical and logical ones.
// In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double)
// on the other hand, casting between date timestamps needs also perform some calculations according to the time unit:
// - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day
// - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3
// No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing
#define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \
gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \
GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \
\
\
output->dtype = LTO; \
if (input->valid && output->valid) { \
thrust::copy(rmm::exec_policy()->on(0), input->valid, input->valid + gdf_num_bitmask_elements(input->size), output->valid); \
} \
\
/* Handling datetime logical castings */ \
if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
/* Handling only physical castings */ \
return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \
}
// Castings functions where Timestamp is the destination type
#define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \
gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \
GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \
\
\
output->dtype = LTO; \
output->dtype_info.time_unit = time_unit; \
if (input->valid && output->valid) { \
thrust::copy(rmm::exec_policy()->on(0), input->valid, input->valid + gdf_num_bitmask_elements(input->size), output->valid); \
} \
\
/* Handling datetime logical castings */ \
if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \
else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \
{ \
if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \
return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \
else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \
return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \
} \
/* Handling only physical castings */ \
return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \
}
#define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \
DEF_CAST_OP(ABREV) \
DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \
DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \
DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \
DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \
DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \
DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \
DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \
DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE)
#define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \
DEF_CAST_OP_TS(ABREV) \
DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \
DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE)
DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32)
DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64)
DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8)
DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32)
DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64)
DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32)
DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64)
DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
|
c9fc6c38f4394c09114d7800b372ec13b598bac6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_nopiv_kernels.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N>
__device__ void
sgetf2_nopiv_device(int m, float* dA, int ldda, magma_int_t *info, const int tx, float* sx, int gbstep)
{
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int linfo = 0;
float abs;
// check from previous calls if the panel factorization failed previously
// this is necessary to report the correct info value
if(gbstep > 0 && *info != 0) return;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N; i++){
if(tx == i){
#pragma unroll
for(int j = 0; j < N; j++)
sx[j] = rA[j];
}
__syncthreads();
abs = fabs(MAGMA_S_REAL( sx[i] )) + fabs(MAGMA_S_IMAG( sx[i] ));
linfo = ( abs == MAGMA_D_ZERO && linfo == 0) ? (gbstep+i+1) : linfo;
//linfo = ( abs == MAGMA_D_ZERO ) ? min(linfo,gbstep+i+1):0;
reg = (linfo == 0 ) ? MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ) : MAGMA_S_ONE;
// scal and ger
if( tx > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
__syncthreads();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + tx ] = rA[i];
}
}
/******************************************************************************/
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetf2_nopiv_batched_kernel( int m, float** dA_array, int ai, int aj, int ldda,
magma_int_t* info_array, int gbstep, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount)return;
float* dA = dA_array[batchid] + aj * ldda + ai;
magma_int_t* info = &info_array[batchid];
float* sx = (float*)zdata;
sx += ty * NPOW2;
sgetf2_nopiv_device<N>(m, dA, ldda, info, tx, sx, gbstep);
}
/***************************************************************************//**
Purpose
-------
sgetf2_nopiv computes the non-pivoting LU factorization of an M-by-N matrix A.
This routine can deal with matrices of limited widths, so it is for internal use.
The factorization has the form
A = L * U
where L is lower triangular with unit diagonal elements (lower
trapezoidal if m > n), and U is upper triangular (upper
trapezoidal if m < n).
This is a batched version that factors batchCount M-by-N matrices in parallel.
Arguments
---------
@param[in]
m INTEGER
The number of rows the matrix A. N >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = L*U; the unit diagonal elements of L are not stored.
@param[in]
ai INTEGER
Row offset for dA_array.
@param[in]
aj INTEGER
Column offset for dA_array.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
Internal use.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_nopiv_internal_batched(
magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magma_int_t* info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue )
{
#define dAarray(i,j) dA_array, i, j
magma_int_t arginfo = 0;
if (m < 0) {
arginfo = -1;
} else if (n < 0 || n > 32 || (m > 512 && n > 16) ) {
arginfo = -2;
} else if (ai < 0) {
arginfo = -4;
} else if (aj < 0) {
arginfo = -5;
} else if (ldda < max(1,m)) {
arginfo = -6;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t m1 = (m > MAX_NTHREADS) ? MAX_NTHREADS : m;
magma_int_t m2 = m - m1;
const magma_int_t ntcol = (m1 > 32) ? 1 : (2 * (32/m1));
magma_int_t shmem = ntcol * magma_ceilpow2(n) * sizeof(float);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 threads(m1, ntcol, 1);
dim3 grid(gridx, 1, 1);
switch(n){
case 1:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 2:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 3:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 4:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 5:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 6:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 7:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 8:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 9:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 10:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 11:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 12:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 13:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 14:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 15:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 16:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 17:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 18:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 19:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 20:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 21:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 22:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 23:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 24:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 25:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 26:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 27:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 28:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 29:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 30:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 31:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 32:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
default: printf("error: panel width %lld is not supported\n", (long long) n);
}
if(m2 > 0){
magmablas_strsm_recursive_batched(
MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit,
m2, n, MAGMA_S_ONE,
dAarray(ai ,aj), ldda,
dAarray(ai+m1,aj), ldda, batchCount, queue );
}
#undef dAarray
return arginfo;
}
| c9fc6c38f4394c09114d7800b372ec13b598bac6.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_nopiv_kernels.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N>
__device__ void
sgetf2_nopiv_device(int m, float* dA, int ldda, magma_int_t *info, const int tx, float* sx, int gbstep)
{
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int linfo = 0;
float abs;
// check from previous calls if the panel factorization failed previously
// this is necessary to report the correct info value
if(gbstep > 0 && *info != 0) return;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N; i++){
if(tx == i){
#pragma unroll
for(int j = 0; j < N; j++)
sx[j] = rA[j];
}
__syncthreads();
abs = fabs(MAGMA_S_REAL( sx[i] )) + fabs(MAGMA_S_IMAG( sx[i] ));
linfo = ( abs == MAGMA_D_ZERO && linfo == 0) ? (gbstep+i+1) : linfo;
//linfo = ( abs == MAGMA_D_ZERO ) ? min(linfo,gbstep+i+1):0;
reg = (linfo == 0 ) ? MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ) : MAGMA_S_ONE;
// scal and ger
if( tx > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
__syncthreads();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + tx ] = rA[i];
}
}
/******************************************************************************/
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetf2_nopiv_batched_kernel( int m, float** dA_array, int ai, int aj, int ldda,
magma_int_t* info_array, int gbstep, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount)return;
float* dA = dA_array[batchid] + aj * ldda + ai;
magma_int_t* info = &info_array[batchid];
float* sx = (float*)zdata;
sx += ty * NPOW2;
sgetf2_nopiv_device<N>(m, dA, ldda, info, tx, sx, gbstep);
}
/***************************************************************************//**
Purpose
-------
sgetf2_nopiv computes the non-pivoting LU factorization of an M-by-N matrix A.
This routine can deal with matrices of limited widths, so it is for internal use.
The factorization has the form
A = L * U
where L is lower triangular with unit diagonal elements (lower
trapezoidal if m > n), and U is upper triangular (upper
trapezoidal if m < n).
This is a batched version that factors batchCount M-by-N matrices in parallel.
Arguments
---------
@param[in]
m INTEGER
The number of rows the matrix A. N >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = L*U; the unit diagonal elements of L are not stored.
@param[in]
ai INTEGER
Row offset for dA_array.
@param[in]
aj INTEGER
Column offset for dA_array.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
Internal use.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_nopiv_internal_batched(
magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magma_int_t* info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue )
{
#define dAarray(i,j) dA_array, i, j
magma_int_t arginfo = 0;
if (m < 0) {
arginfo = -1;
} else if (n < 0 || n > 32 || (m > 512 && n > 16) ) {
arginfo = -2;
} else if (ai < 0) {
arginfo = -4;
} else if (aj < 0) {
arginfo = -5;
} else if (ldda < max(1,m)) {
arginfo = -6;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t m1 = (m > MAX_NTHREADS) ? MAX_NTHREADS : m;
magma_int_t m2 = m - m1;
const magma_int_t ntcol = (m1 > 32) ? 1 : (2 * (32/m1));
magma_int_t shmem = ntcol * magma_ceilpow2(n) * sizeof(float);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 threads(m1, ntcol, 1);
dim3 grid(gridx, 1, 1);
switch(n){
case 1: sgetf2_nopiv_batched_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 2: sgetf2_nopiv_batched_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 3: sgetf2_nopiv_batched_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 4: sgetf2_nopiv_batched_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 5: sgetf2_nopiv_batched_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 6: sgetf2_nopiv_batched_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 7: sgetf2_nopiv_batched_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 8: sgetf2_nopiv_batched_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 9: sgetf2_nopiv_batched_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 10: sgetf2_nopiv_batched_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 11: sgetf2_nopiv_batched_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 12: sgetf2_nopiv_batched_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 13: sgetf2_nopiv_batched_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 14: sgetf2_nopiv_batched_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 15: sgetf2_nopiv_batched_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 16: sgetf2_nopiv_batched_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 17: sgetf2_nopiv_batched_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 18: sgetf2_nopiv_batched_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 19: sgetf2_nopiv_batched_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 20: sgetf2_nopiv_batched_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 21: sgetf2_nopiv_batched_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 22: sgetf2_nopiv_batched_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 23: sgetf2_nopiv_batched_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 24: sgetf2_nopiv_batched_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 25: sgetf2_nopiv_batched_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 26: sgetf2_nopiv_batched_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 27: sgetf2_nopiv_batched_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 28: sgetf2_nopiv_batched_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 29: sgetf2_nopiv_batched_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 30: sgetf2_nopiv_batched_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 31: sgetf2_nopiv_batched_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 32: sgetf2_nopiv_batched_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
default: printf("error: panel width %lld is not supported\n", (long long) n);
}
if(m2 > 0){
magmablas_strsm_recursive_batched(
MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit,
m2, n, MAGMA_S_ONE,
dAarray(ai ,aj), ldda,
dAarray(ai+m1,aj), ldda, batchCount, queue );
}
#undef dAarray
return arginfo;
}
|
ff9efc15b9c929e38135a88ded3906aa81d38d90.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
int main()
{
hipError_t error;
hipDeviceProp_t dev;
int dev_cnt = 0;
hipProfilerStart();
// return device numbers with compute capability >= 1.0
error = hipGetDeviceCount (&dev_cnt);
if(error != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(error));
exit(-1);
}
printf("Number of devices: %d\n",dev_cnt);
// Get properties of each device
for(int i = 0; i < dev_cnt; i++)
{
error = hipGetDeviceProperties(&dev, i);
if(error != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", i);
printf("name: %s\n",dev.name);
printf("Compute capability %d.%d\n",dev.major, dev.minor);
printf("total global memory(KB): %ld\n", dev.totalGlobalMem/1024);
printf("shared mem per block: %d\n",dev.sharedMemPerBlock);
printf("regs per block: %d\n", dev.regsPerBlock);
printf("warp size: %d\n", dev.warpSize);
printf("max threads per block: %d\n",dev.maxThreadsPerBlock);
printf("max thread dim z:%d y:%d x:%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]);
printf("max grid size z:%d y:%d x:%d\n", dev.maxGridSize[0],dev.maxGridSize[1], dev.maxGridSize[2]);
printf("clock rate(KHz):\n",dev.clockRate);
printf("total constant memory (bytes): %ld\n",dev.totalConstMem);
printf("multiprocessor count %d\n",dev.multiProcessorCount);
printf("memory bus width: %d\n",dev.memoryBusWidth);
printf("memory clock rate (KHz): %d\n",dev.memoryClockRate);
printf("L2 cache size (bytes): %d\n", dev.l2CacheSize);
printf("max threads per SM: %d\n", dev.maxThreadsPerMultiProcessor);
}
hipProfilerStop();
return 0;
}
| ff9efc15b9c929e38135a88ded3906aa81d38d90.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_profiler_api.h>
int main()
{
cudaError_t error;
cudaDeviceProp dev;
int dev_cnt = 0;
cudaProfilerStart();
// return device numbers with compute capability >= 1.0
error = cudaGetDeviceCount (&dev_cnt);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("Number of devices: %d\n",dev_cnt);
// Get properties of each device
for(int i = 0; i < dev_cnt; i++)
{
error = cudaGetDeviceProperties(&dev, i);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", i);
printf("name: %s\n",dev.name);
printf("Compute capability %d.%d\n",dev.major, dev.minor);
printf("total global memory(KB): %ld\n", dev.totalGlobalMem/1024);
printf("shared mem per block: %d\n",dev.sharedMemPerBlock);
printf("regs per block: %d\n", dev.regsPerBlock);
printf("warp size: %d\n", dev.warpSize);
printf("max threads per block: %d\n",dev.maxThreadsPerBlock);
printf("max thread dim z:%d y:%d x:%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]);
printf("max grid size z:%d y:%d x:%d\n", dev.maxGridSize[0],dev.maxGridSize[1], dev.maxGridSize[2]);
printf("clock rate(KHz):\n",dev.clockRate);
printf("total constant memory (bytes): %ld\n",dev.totalConstMem);
printf("multiprocessor count %d\n",dev.multiProcessorCount);
printf("memory bus width: %d\n",dev.memoryBusWidth);
printf("memory clock rate (KHz): %d\n",dev.memoryClockRate);
printf("L2 cache size (bytes): %d\n", dev.l2CacheSize);
printf("max threads per SM: %d\n", dev.maxThreadsPerMultiProcessor);
}
cudaProfilerStop();
return 0;
}
|
90c0a9c00c4abbc888691a22dce53854e2af25fa.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/hip/im2col.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorMeta.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/ConvUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#endif
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
__func__,
{input_arg, output_arg, weight_arg, bias_arg});
int n_input_plane = weight.size(0);
int n_output_plane = weight.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor input_ = input.contiguous();
Tensor weight_ = weight.contiguous();
Tensor bias_ = Tensor();
if (bias.defined()) {
bias_ = bias.contiguous();
}
bool is_batch = false;
if (input_.dim() == 3) {
// Force batch
is_batch = true;
input_.resize_({1, input_.size(0), input_.size(1), input_.size(2)});
}
int64_t input_height = input_.size(2);
int64_t input_width = input_.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input_.size(0);
// Create temporary columns
Tensor columns_ = at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input_.options());
// Define a buffer of ones, for bias accumulation
Tensor ones_ = bias.defined() ? at::ones({output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input_.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight_.size(1) * weight_.size(2) * weight_.size(3);
int64_t n = input_height * input_width;
int64_t k = weight_.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight_.data_ptr<scalar_t>(),
m,
0,
columns_.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns_.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
1,
ones_.data_ptr<scalar_t>(),
k_,
bias_.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input_.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
__func__,
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_height != 1 || kernel_width != 1 || stride_height != 1 ||
stride_width != 1 || pad_height != 0 || pad_width != 0 ||
dilation_height != 1 || dilation_width != 1);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_height * input_width;
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
1,
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
__func__,
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_height != 1 || kernel_width != 1 || stride_height != 1 ||
stride_width != 1 || pad_height != 0 || pad_width != 0 ||
dilation_height != 1 || dilation_width != 1);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_height * kernel_width;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3});
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
TORCH_IMPL_FUNC(slow_conv_transpose2d_structured_cuda)
(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
OptionalTensorRef bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& output) {
const Tensor& bias = bias_opt.getTensorRef();
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
REGISTER_CUDA_DISPATCH(slow_conv_transpose2d_backward_stub, &slow_conv_transpose2d_backward_cuda);
} // namespace native
} // namespace at
| 90c0a9c00c4abbc888691a22dce53854e2af25fa.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorMeta.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/ConvUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#endif
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
__func__,
{input_arg, output_arg, weight_arg, bias_arg});
int n_input_plane = weight.size(0);
int n_output_plane = weight.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor input_ = input.contiguous();
Tensor weight_ = weight.contiguous();
Tensor bias_ = Tensor();
if (bias.defined()) {
bias_ = bias.contiguous();
}
bool is_batch = false;
if (input_.dim() == 3) {
// Force batch
is_batch = true;
input_.resize_({1, input_.size(0), input_.size(1), input_.size(2)});
}
int64_t input_height = input_.size(2);
int64_t input_width = input_.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input_.size(0);
// Create temporary columns
Tensor columns_ = at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input_.options());
// Define a buffer of ones, for bias accumulation
Tensor ones_ = bias.defined() ? at::ones({output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input_.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight_.size(1) * weight_.size(2) * weight_.size(3);
int64_t n = input_height * input_width;
int64_t k = weight_.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight_.data_ptr<scalar_t>(),
m,
0,
columns_.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns_.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
1,
ones_.data_ptr<scalar_t>(),
k_,
bias_.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input_.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
__func__,
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_height != 1 || kernel_width != 1 || stride_height != 1 ||
stride_width != 1 || pad_height != 0 || pad_width != 0 ||
dilation_height != 1 || dilation_width != 1);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_height * input_width;
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
1,
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
__func__,
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_height != 1 || kernel_width != 1 || stride_height != 1 ||
stride_width != 1 || pad_height != 0 || pad_width != 0 ||
dilation_height != 1 || dilation_width != 1);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height,
input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_height * kernel_width;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3});
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
TORCH_IMPL_FUNC(slow_conv_transpose2d_structured_cuda)
(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
OptionalTensorRef bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& output) {
const Tensor& bias = bias_opt.getTensorRef();
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
REGISTER_CUDA_DISPATCH(slow_conv_transpose2d_backward_stub, &slow_conv_transpose2d_backward_cuda);
} // namespace native
} // namespace at
|
963c1a44b5877e3979dde76e96bb895472633aa6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.h"
__device__ __host__
double f(double x) {
return exp(cos(x))-2;
};
__device__ __host__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if(i<n) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memory copy overlap test of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
hipInit(0);
double* xd = malloc_device<double>(n);
double* xh = malloc_host<double>(n, 1.5);
double* x = malloc_host<double>(n);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (n+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, n);
time_h2d += get_time();
hipDeviceSynchronize();
auto time_kernel = -get_time();
hipLaunchKernelGGL(( newton_device), dim3(grid_dim), dim3(block_dim), 0, 0, n, xd);
hipDeviceSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, n);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_h2d << std::endl;
std::cout << "D2H : " << time_d2h << std::endl;
std::cout << "kernel : " << time_kernel << std::endl;
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
hipFree(xd);
free(xh);
free(x);
return 0;
}
| 963c1a44b5877e3979dde76e96bb895472633aa6.cu | #include <iostream>
#include <cuda.h>
#include "util.h"
__device__ __host__
double f(double x) {
return exp(cos(x))-2;
};
__device__ __host__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if(i<n) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memory copy overlap test of length n = " << n
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< std::endl;
cuInit(0);
double* xd = malloc_device<double>(n);
double* xh = malloc_host<double>(n, 1.5);
double* x = malloc_host<double>(n);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (n+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, n);
time_h2d += get_time();
cudaThreadSynchronize();
auto time_kernel = -get_time();
newton_device<<<grid_dim, block_dim>>>(n, xd);
cudaThreadSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, n);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_h2d << std::endl;
std::cout << "D2H : " << time_d2h << std::endl;
std::cout << "kernel : " << time_kernel << std::endl;
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(std::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
cudaFree(xd);
free(xh);
free(x);
return 0;
}
|
aacb5631d37d19f073b31e66d360c6197d82b759.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg4_maps[n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
int sent[6] = {0,0,0,0,0,0}; //array to set if halo is exchanged
if(ninds > 0) //indirect loop
{
for(int i = 0; i<nargs; i++)
{
if(args[i].argtype == OP_ARG_DAT)
{
if (OP_diags==1) reset_halo(args[i]);
sent[i] = exchange_halo_cuda(args[i]);
//if(sent[i] == 1)wait_all_cuda(args[i]);
}
}
}
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_plan *Plan;
int block_offset;
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
//get offsets
int core_len = core_num[set->index];
int noncore_len = set->size + OP_import_exec_list[set->index]->size - core_len;
if(core_len >0){
//process core set
if (OP_latency_sets[set->index].core_set == NULL) {
op_set core_set = ( op_set ) malloc ( sizeof ( op_set_core ) );
core_set->index = set->index;
core_set->name = set->name;
core_set->size = core_len;
core_set->exec_size = 0;
core_set->nonexec_size = 0;
OP_latency_sets[set->index].core_set = core_set;
}
Plan = op_plan_get_offset(name,OP_latency_sets[set->index].core_set,0,part_size,nargs,args,ninds,inds);
// initialise timers
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
if(ninds > 0) //indirect loop
{
for(int i = 0; i<nargs; i++)
{
if(args[i].argtype == OP_ARG_DAT)
{
if(sent[i] == 1)wait_all_cuda(args[i]);
}
}
}
if (noncore_len>0) {
if (OP_latency_sets[set->index].noncore_set == NULL) {
op_set noncore_set = ( op_set ) malloc ( sizeof ( op_set_core ) );
noncore_set->size = noncore_len;
noncore_set->name = set->name;
noncore_set->index = set->index;
noncore_set->exec_size = 0;
noncore_set->nonexec_size = 0;
OP_latency_sets[set->index].noncore_set = noncore_set;
}
Plan = op_plan_get_offset(name,OP_latency_sets[set->index].noncore_set,core_len,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
/*#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif*/
int nthread = 128;
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d + core_len*arg5.dim,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
//set dirty bit on direct/indirect datasets with access OP_INC,OP_WRITE, OP_RW
for(int i = 0; i<nargs; i++)
if(args[i].argtype == OP_ARG_DAT)
set_dirtybit(args[i]);
//performe any global operations
// - NONE
// update kernel record
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
}
| aacb5631d37d19f073b31e66d360c6197d82b759.cu | //
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map;
if (col2>=0) {
arg4_map = arg4_maps[n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
int sent[6] = {0,0,0,0,0,0}; //array to set if halo is exchanged
if(ninds > 0) //indirect loop
{
for(int i = 0; i<nargs; i++)
{
if(args[i].argtype == OP_ARG_DAT)
{
if (OP_diags==1) reset_halo(args[i]);
sent[i] = exchange_halo_cuda(args[i]);
//if(sent[i] == 1)wait_all_cuda(args[i]);
}
}
}
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_plan *Plan;
int block_offset;
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
//get offsets
int core_len = core_num[set->index];
int noncore_len = set->size + OP_import_exec_list[set->index]->size - core_len;
if(core_len >0){
//process core set
if (OP_latency_sets[set->index].core_set == NULL) {
op_set core_set = ( op_set ) malloc ( sizeof ( op_set_core ) );
core_set->index = set->index;
core_set->name = set->name;
core_set->size = core_len;
core_set->exec_size = 0;
core_set->nonexec_size = 0;
OP_latency_sets[set->index].core_set = core_set;
}
Plan = op_plan_get_offset(name,OP_latency_sets[set->index].core_set,0,part_size,nargs,args,ninds,inds);
// initialise timers
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
if(ninds > 0) //indirect loop
{
for(int i = 0; i<nargs; i++)
{
if(args[i].argtype == OP_ARG_DAT)
{
if(sent[i] == 1)wait_all_cuda(args[i]);
}
}
}
if (noncore_len>0) {
if (OP_latency_sets[set->index].noncore_set == NULL) {
op_set noncore_set = ( op_set ) malloc ( sizeof ( op_set_core ) );
noncore_set->size = noncore_len;
noncore_set->name = set->name;
noncore_set->index = set->index;
noncore_set->exec_size = 0;
noncore_set->nonexec_size = 0;
OP_latency_sets[set->index].noncore_set = noncore_set;
}
Plan = op_plan_get_offset(name,OP_latency_sets[set->index].noncore_set,core_len,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
/*#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif*/
int nthread = 128;
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
int nshared = Plan->nshared;
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d + core_len*arg5.dim,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col]);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += Plan->ncolblk[col];
}
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
//set dirty bit on direct/indirect datasets with access OP_INC,OP_WRITE, OP_RW
for(int i = 0; i<nargs; i++)
if(args[i].argtype == OP_ARG_DAT)
set_dirtybit(args[i]);
//performe any global operations
// - NONE
// update kernel record
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.