hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
27bd22da1d62e25357d2216dee76fd8fac91e57a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <float.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "common.h"
#include <hip/hip_runtime.h>
double size;
//
// tuned constants
//
//
// timer
//
double read_timer( )
{
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
//
// keep density constant
//
void set_size( int n )
{
size = sqrt( density * n );
}
//
// Initialize the particle positions and velocities
//
void init_particles( int n, particle_t *p )
{
srand48( time( NULL ) );
int sx = (int)ceil(sqrt((double)n));
int sy = (n+sx-1)/sx;
int *shuffle = (int*)malloc( n * sizeof(int) );
for( int i = 0; i < n; i++ )
shuffle[i] = i;
for( int i = 0; i < n; i++ )
{
//
// make sure particles are not spatially sorted
//
int j = lrand48()%(n-i);
int k = shuffle[j];
shuffle[j] = shuffle[n-i-1];
//
// distribute particles evenly to ensure proper spacing
//
p[i].x = size*(1.+(k%sx))/(1+sx);
p[i].y = size*(1.+(k/sx))/(1+sy);
//
// assign random velocities within a bound
//
p[i].vx = drand48()*2-1;
p[i].vy = drand48()*2-1;
}
free( shuffle );
}
//
// interact two particles
//
void apply_force( particle_t &particle, particle_t &neighbor )
{
double dx = neighbor.x - particle.x;
double dy = neighbor.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
r2 = fmax( r2, min_r*min_r );
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
//
// integrate the ODE
//
void move( particle_t &p )
{
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p.vx += p.ax * dt;
p.vy += p.ay * dt;
p.x += p.vx * dt;
p.y += p.vy * dt;
//
// bounce from walls
//
while( p.x < 0 || p.x > size )
{
p.x = p.x < 0 ? -p.x : 2*size-p.x;
p.vx = -p.vx;
}
while( p.y < 0 || p.y > size )
{
p.y = p.y < 0 ? -p.y : 2*size-p.y;
p.vy = -p.vy;
}
}
//
// CUDA Routines (GPU)
//
// Taking the location of the particle x,y, we transform it into IDs
__device__ int IDparticle(double x, double y, double meshSize, int xmesh) {
// Coordinates (indexes)
int xid = x / meshSize;
int yid = y / meshSize;
// Return ID location
return xid * xmesh + yid;
}
// Taking the location of the particle x,y, we transform it into IDs
__device__ int IDparticle(particle_t &particle, double meshSize, int xmesh) {
// Coordinates (indexes)
int xid = particle.x / meshSize;
int yid = particle.y / meshSize;
// Return ID location
return xid * xmesh + yid;
}
// Push particles into the mesh
__global__ void push2mesh_gpu(particle_t * particles, int n, int* adj,
int* submesh, double meshSize, int xmesh) {
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// If thread is n, stop
if(tid >= n) return;
// Otherwise, get next particle ID
int k = IDparticle(particles[tid], meshSize, xmesh);
// From NVIDIA: atomicExch(int *address, int val)
// reads the 32-bit or 64-bit word old located at the address "address" in global or shared memory
// and stores val back to memory at the same address. These two operations are performed in one
// atomic transaction.
// Thread ID is associated with kth particle's sub mesh
adj[tid] = atomicExch(&submesh[k], tid);
}
// Clear the mesh
__global__ void clear(int Nmeshs, int* submesh) {
// Get thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// If thread is greater or equal than the total number of existing meshs, stop
if(tid >= Nmeshs) return;
// Otherwise, set sub mesh to thread -1
submesh[tid] = -1;
}
// Apply force gpu function for particle's local neighborhood (same as Vanilla)
__device__ void apply_force_gpu(particle_t &particle, particle_t &adjCell)
{
double dx = adjCell.x - particle.x;
double dy = adjCell.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
// Apply forces to own thread's mesh
__device__ void submeshForce(particle_t * particles, int tid, int * adj, int submesh) {
// Pointer to thread's particles
particle_t* p = &particles[tid];
// Apply force to particles with i != current thread
for(int i = submesh; i != -1; i = adj[i]) {
if(i != tid)
apply_force_gpu(*p, particles[i]);
}
}
// Apply forces to thread's mesh
__device__ void submeshForceAll(particle_t * particles, int tid, int * adj, int submesh) {
// Pointer to thread's particles
particle_t* p = &particles[tid];
// Apply force of particles in the mesh
for(int i = submesh; i != -1; i = adj[i]) {
apply_force_gpu(*p, particles[i]);
}
}
// Compute forces gpu: main mod to deal with own particles and adjacent ones
__global__ void compute_forces_gpu(particle_t * particles, int n, int * adj, int * submesh, double meshSize, int xmesh)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
// Calculate ID and location
particle_t * p = &particles[tid];
int xid = p -> x / meshSize;
int yid = p -> y / meshSize;
int k = xid * xmesh + yid;
// Set ax and ay = 0
p->ax = p->ay = 0;
// Forces are computed (if needed) inside the submesh
submeshForce(particles, tid, adj, submesh[k]);
// Forces are computed (if needed) w.r.t. other submeshs
// Cases: check corresponding submesh
// Right
if(xid > 0) {
submeshForceAll(particles, tid, adj, submesh[k - xmesh]);
if(yid > 0)
submeshForceAll(particles, tid, adj, submesh[k - xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, tid, adj, submesh[k - xmesh + 1]);
}
// Left
if(xid < xmesh - 1) {
submeshForceAll( particles, tid, adj, submesh[k + xmesh]);
if(yid > 0)
submeshForceAll(particles, tid, adj, submesh[k + xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, tid, adj, submesh[k + xmesh + 1]);
}
// Up
if(yid > 0) submeshForceAll(particles, tid, adj, submesh[k - 1]);
// Down
if(yid < xmesh - 1) submeshForceAll(particles, tid, adj, submesh[k + 1]);
}
// Compute forces for meshs gpu: main mod to deal with own particles and adjacent ones
__global__ void compute_forces_mesh_gpu(particle_t * particles, int * adj,int Nmeshs, int * submesh, double meshSize, int xmesh)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= Nmeshs) return;
// Calculate ID and location
int xid = tid / xmesh;
int yid = tid % xmesh;
int k = tid;
// Get particles
for(int i = submesh[tid]; i != -1; i = adj[i]) {
particle_t * p = &particles[i];
// Set ax and ay = 0
p->ax = p->ay = 0;
// Forces are computed (if needed) inside the submesh
submeshForce(particles, i, adj, submesh[k]);
// Forces are computed (if needed) w.r.t. other submeshs
// Cases: check corresponding submesh
// Right
if(xid > 0) {
submeshForceAll(particles, i, adj, submesh[k - xmesh]);
if(yid > 0)
submeshForceAll(particles, i, adj, submesh[k - xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, i, adj, submesh[k - xmesh + 1]);
}
// Left
if(xid < xmesh - 1) {
submeshForceAll(particles, i, adj, submesh[k + xmesh]);
if(yid > 0)
submeshForceAll(particles, i, adj, submesh[k + xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, i, adj, submesh[k + xmesh + 1]);
}
// Up
if(yid > 0) submeshForceAll(particles, i, adj, submesh[k - 1]);
// Down
if(yid < xmesh - 1) submeshForceAll(particles, i, adj, submesh[k + 1]);
}
}
// Move particles function (same as Vanilla)
__global__ void move_gpu (particle_t * particles, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particle_t * p = &particles[tid];
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p->vx += p->ax * dt;
p->vy += p->ay * dt;
p->x += p->vx * dt;
p->y += p->vy * dt;
//
// bounce from walls
//
while( p->x < 0 || p->x > size )
{
p->x = p->x < 0 ? -(p->x) : 2*size-p->x;
p->vx = -(p->vx);
}
while( p->y < 0 || p->y > size )
{
p->y = p->y < 0 ? -(p->y) : 2*size-p->y;
p->vy = -(p->vy);
}
}
//
// I/O routines
//
void save( FILE *f, int n, particle_t *p )
{
static bool first = true;
if( first )
{
fprintf( f, "%d %g\n", n, size );
first = false;
}
for( int i = 0; i < n; i++ )
fprintf( f, "%12.10f %12.10f\n", p[i].x, p[i].y );
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
for( int i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
char *read_string( int argc, char **argv, const char *option, char *default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return argv[iplace+1];
return default_value;
}
| 27bd22da1d62e25357d2216dee76fd8fac91e57a.cu | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <float.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "common.h"
#include <cuda.h>
double size;
//
// tuned constants
//
//
// timer
//
double read_timer( )
{
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
//
// keep density constant
//
void set_size( int n )
{
size = sqrt( density * n );
}
//
// Initialize the particle positions and velocities
//
void init_particles( int n, particle_t *p )
{
srand48( time( NULL ) );
int sx = (int)ceil(sqrt((double)n));
int sy = (n+sx-1)/sx;
int *shuffle = (int*)malloc( n * sizeof(int) );
for( int i = 0; i < n; i++ )
shuffle[i] = i;
for( int i = 0; i < n; i++ )
{
//
// make sure particles are not spatially sorted
//
int j = lrand48()%(n-i);
int k = shuffle[j];
shuffle[j] = shuffle[n-i-1];
//
// distribute particles evenly to ensure proper spacing
//
p[i].x = size*(1.+(k%sx))/(1+sx);
p[i].y = size*(1.+(k/sx))/(1+sy);
//
// assign random velocities within a bound
//
p[i].vx = drand48()*2-1;
p[i].vy = drand48()*2-1;
}
free( shuffle );
}
//
// interact two particles
//
void apply_force( particle_t &particle, particle_t &neighbor )
{
double dx = neighbor.x - particle.x;
double dy = neighbor.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
r2 = fmax( r2, min_r*min_r );
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
//
// integrate the ODE
//
void move( particle_t &p )
{
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p.vx += p.ax * dt;
p.vy += p.ay * dt;
p.x += p.vx * dt;
p.y += p.vy * dt;
//
// bounce from walls
//
while( p.x < 0 || p.x > size )
{
p.x = p.x < 0 ? -p.x : 2*size-p.x;
p.vx = -p.vx;
}
while( p.y < 0 || p.y > size )
{
p.y = p.y < 0 ? -p.y : 2*size-p.y;
p.vy = -p.vy;
}
}
//
// CUDA Routines (GPU)
//
// Taking the location of the particle x,y, we transform it into IDs
__device__ int IDparticle(double x, double y, double meshSize, int xmesh) {
// Coordinates (indexes)
int xid = x / meshSize;
int yid = y / meshSize;
// Return ID location
return xid * xmesh + yid;
}
// Taking the location of the particle x,y, we transform it into IDs
__device__ int IDparticle(particle_t &particle, double meshSize, int xmesh) {
// Coordinates (indexes)
int xid = particle.x / meshSize;
int yid = particle.y / meshSize;
// Return ID location
return xid * xmesh + yid;
}
// Push particles into the mesh
__global__ void push2mesh_gpu(particle_t * particles, int n, int* adj,
int* submesh, double meshSize, int xmesh) {
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// If thread is n, stop
if(tid >= n) return;
// Otherwise, get next particle ID
int k = IDparticle(particles[tid], meshSize, xmesh);
// From NVIDIA: atomicExch(int *address, int val)
// reads the 32-bit or 64-bit word old located at the address "address" in global or shared memory
// and stores val back to memory at the same address. These two operations are performed in one
// atomic transaction.
// Thread ID is associated with kth particle's sub mesh
adj[tid] = atomicExch(&submesh[k], tid);
}
// Clear the mesh
__global__ void clear(int Nmeshs, int* submesh) {
// Get thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// If thread is greater or equal than the total number of existing meshs, stop
if(tid >= Nmeshs) return;
// Otherwise, set sub mesh to thread -1
submesh[tid] = -1;
}
// Apply force gpu function for particle's local neighborhood (same as Vanilla)
__device__ void apply_force_gpu(particle_t &particle, particle_t &adjCell)
{
double dx = adjCell.x - particle.x;
double dy = adjCell.y - particle.y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle.ax += coef * dx;
particle.ay += coef * dy;
}
// Apply forces to own thread's mesh
__device__ void submeshForce(particle_t * particles, int tid, int * adj, int submesh) {
// Pointer to thread's particles
particle_t* p = &particles[tid];
// Apply force to particles with i != current thread
for(int i = submesh; i != -1; i = adj[i]) {
if(i != tid)
apply_force_gpu(*p, particles[i]);
}
}
// Apply forces to thread's mesh
__device__ void submeshForceAll(particle_t * particles, int tid, int * adj, int submesh) {
// Pointer to thread's particles
particle_t* p = &particles[tid];
// Apply force of particles in the mesh
for(int i = submesh; i != -1; i = adj[i]) {
apply_force_gpu(*p, particles[i]);
}
}
// Compute forces gpu: main mod to deal with own particles and adjacent ones
__global__ void compute_forces_gpu(particle_t * particles, int n, int * adj, int * submesh, double meshSize, int xmesh)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
// Calculate ID and location
particle_t * p = &particles[tid];
int xid = p -> x / meshSize;
int yid = p -> y / meshSize;
int k = xid * xmesh + yid;
// Set ax and ay = 0
p->ax = p->ay = 0;
// Forces are computed (if needed) inside the submesh
submeshForce(particles, tid, adj, submesh[k]);
// Forces are computed (if needed) w.r.t. other submeshs
// Cases: check corresponding submesh
// Right
if(xid > 0) {
submeshForceAll(particles, tid, adj, submesh[k - xmesh]);
if(yid > 0)
submeshForceAll(particles, tid, adj, submesh[k - xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, tid, adj, submesh[k - xmesh + 1]);
}
// Left
if(xid < xmesh - 1) {
submeshForceAll( particles, tid, adj, submesh[k + xmesh]);
if(yid > 0)
submeshForceAll(particles, tid, adj, submesh[k + xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, tid, adj, submesh[k + xmesh + 1]);
}
// Up
if(yid > 0) submeshForceAll(particles, tid, adj, submesh[k - 1]);
// Down
if(yid < xmesh - 1) submeshForceAll(particles, tid, adj, submesh[k + 1]);
}
// Compute forces for meshs gpu: main mod to deal with own particles and adjacent ones
__global__ void compute_forces_mesh_gpu(particle_t * particles, int * adj,int Nmeshs, int * submesh, double meshSize, int xmesh)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= Nmeshs) return;
// Calculate ID and location
int xid = tid / xmesh;
int yid = tid % xmesh;
int k = tid;
// Get particles
for(int i = submesh[tid]; i != -1; i = adj[i]) {
particle_t * p = &particles[i];
// Set ax and ay = 0
p->ax = p->ay = 0;
// Forces are computed (if needed) inside the submesh
submeshForce(particles, i, adj, submesh[k]);
// Forces are computed (if needed) w.r.t. other submeshs
// Cases: check corresponding submesh
// Right
if(xid > 0) {
submeshForceAll(particles, i, adj, submesh[k - xmesh]);
if(yid > 0)
submeshForceAll(particles, i, adj, submesh[k - xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, i, adj, submesh[k - xmesh + 1]);
}
// Left
if(xid < xmesh - 1) {
submeshForceAll(particles, i, adj, submesh[k + xmesh]);
if(yid > 0)
submeshForceAll(particles, i, adj, submesh[k + xmesh - 1]);
if(yid < xmesh - 1)
submeshForceAll(particles, i, adj, submesh[k + xmesh + 1]);
}
// Up
if(yid > 0) submeshForceAll(particles, i, adj, submesh[k - 1]);
// Down
if(yid < xmesh - 1) submeshForceAll(particles, i, adj, submesh[k + 1]);
}
}
// Move particles function (same as Vanilla)
__global__ void move_gpu (particle_t * particles, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
particle_t * p = &particles[tid];
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
p->vx += p->ax * dt;
p->vy += p->ay * dt;
p->x += p->vx * dt;
p->y += p->vy * dt;
//
// bounce from walls
//
while( p->x < 0 || p->x > size )
{
p->x = p->x < 0 ? -(p->x) : 2*size-p->x;
p->vx = -(p->vx);
}
while( p->y < 0 || p->y > size )
{
p->y = p->y < 0 ? -(p->y) : 2*size-p->y;
p->vy = -(p->vy);
}
}
//
// I/O routines
//
void save( FILE *f, int n, particle_t *p )
{
static bool first = true;
if( first )
{
fprintf( f, "%d %g\n", n, size );
first = false;
}
for( int i = 0; i < n; i++ )
fprintf( f, "%12.10f %12.10f\n", p[i].x, p[i].y );
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
for( int i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
char *read_string( int argc, char **argv, const char *option, char *default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return argv[iplace+1];
return default_value;
}
|
45b05e694f1d2a40402796675d35136a6326a684.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./kern.cuh"
#include "megdnn/internal/defs.h"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace indexing_multi_axis_vec;
namespace {
template <int nidx, int idx_ndim>
__global__ void kgen_offset_base(GenOffsetBaseParam<nidx, idx_ndim> param) {
int oidx = threadIdx.x + blockDim.x * blockIdx.x;
if (oidx < param.size) {
int offset = 0;
#pragma unroll
for (int i = 0; i < nidx; ++i) {
auto& indexer = param.indexer[i];
// index in index
int idx_flat = 0, coidx = oidx;
#pragma unroll
for (int j = idx_ndim - 1; j >= 0; --j) {
int ax_idx;
if (j) {
int next_coidx = coidx / indexer.shape[j - 1];
ax_idx = coidx - (next_coidx * indexer.shape[j - 1].divisor());
coidx = next_coidx;
} else {
ax_idx = coidx;
}
idx_flat += indexer.stride[j] * ax_idx;
}
int data_idx = indexer.ptr[idx_flat];
data_idx += (data_idx < 0 ? param.data_shape[i] : 0);
if (static_cast<uint32_t>(data_idx) >= param.data_shape[i]) {
// cast to uint32 to handle both negative and overflow
set_async_error_info(
param.error_info, param.error_tracker,
"invalid advanced indexing: "
"indexer=%d idx=%d shape=%d",
i, data_idx, param.data_shape[i]);
data_idx = 0;
}
// calculate offset from current index
offset += data_idx * param.data_stride[i];
}
// sum offsets and store at offset table
param.output[oidx] = offset;
}
}
} // namespace
template <int nidx, int idx_ndim>
void indexing_multi_axis_vec::gen_offset_base(
const GenOffsetBaseParam<nidx, idx_ndim>& param, hipStream_t stream) {
void (*kptr)(GenOffsetBaseParam<nidx, idx_ndim>) = kgen_offset_base<nidx, idx_ndim>;
int bsize = query_blocksize_for_kernel(kptr);
(hipLaunchKernelGGL((*kptr)), dim3(DIVUP(param.size, bsize)), dim3(bsize), 0, stream, param);
}
namespace megdnn {
namespace cuda {
namespace indexing_multi_axis_vec {
#define INST(_m, _n) \
template void gen_offset_base(const GenOffsetBaseParam<_m, _n>&, hipStream_t);
MEGDNN_FOREACH_TENSOR_NDIM(INST, 1)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 2)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 3)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 4)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 5)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 6)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 7)
#undef INST
} // namespace indexing_multi_axis_vec
} // namespace cuda
} // namespace megdnn
// vim: ft=cuda syntax=cpp.doxygen
| 45b05e694f1d2a40402796675d35136a6326a684.cu | #include "./kern.cuh"
#include "megdnn/internal/defs.h"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace indexing_multi_axis_vec;
namespace {
template <int nidx, int idx_ndim>
__global__ void kgen_offset_base(GenOffsetBaseParam<nidx, idx_ndim> param) {
int oidx = threadIdx.x + blockDim.x * blockIdx.x;
if (oidx < param.size) {
int offset = 0;
#pragma unroll
for (int i = 0; i < nidx; ++i) {
auto& indexer = param.indexer[i];
// index in index
int idx_flat = 0, coidx = oidx;
#pragma unroll
for (int j = idx_ndim - 1; j >= 0; --j) {
int ax_idx;
if (j) {
int next_coidx = coidx / indexer.shape[j - 1];
ax_idx = coidx - (next_coidx * indexer.shape[j - 1].divisor());
coidx = next_coidx;
} else {
ax_idx = coidx;
}
idx_flat += indexer.stride[j] * ax_idx;
}
int data_idx = indexer.ptr[idx_flat];
data_idx += (data_idx < 0 ? param.data_shape[i] : 0);
if (static_cast<uint32_t>(data_idx) >= param.data_shape[i]) {
// cast to uint32 to handle both negative and overflow
set_async_error_info(
param.error_info, param.error_tracker,
"invalid advanced indexing: "
"indexer=%d idx=%d shape=%d",
i, data_idx, param.data_shape[i]);
data_idx = 0;
}
// calculate offset from current index
offset += data_idx * param.data_stride[i];
}
// sum offsets and store at offset table
param.output[oidx] = offset;
}
}
} // namespace
template <int nidx, int idx_ndim>
void indexing_multi_axis_vec::gen_offset_base(
const GenOffsetBaseParam<nidx, idx_ndim>& param, cudaStream_t stream) {
void (*kptr)(GenOffsetBaseParam<nidx, idx_ndim>) = kgen_offset_base<nidx, idx_ndim>;
int bsize = query_blocksize_for_kernel(kptr);
(*kptr)<<<DIVUP(param.size, bsize), bsize, 0, stream>>>(param);
}
namespace megdnn {
namespace cuda {
namespace indexing_multi_axis_vec {
#define INST(_m, _n) \
template void gen_offset_base(const GenOffsetBaseParam<_m, _n>&, cudaStream_t);
MEGDNN_FOREACH_TENSOR_NDIM(INST, 1)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 2)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 3)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 4)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 5)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 6)
MEGDNN_FOREACH_TENSOR_NDIM(INST, 7)
#undef INST
} // namespace indexing_multi_axis_vec
} // namespace cuda
} // namespace megdnn
// vim: ft=cuda syntax=cpp.doxygen
|
b411d69f00124c8434b94726cc707d965e35ba30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelMultMat(int *a, int *b, int *c,int m){
int i,add;
int col=blockDim.x*blockIdx.x + threadIdx.x;
int row=blockDim.y*blockIdx.y + threadIdx.y;
if(col<m && row<m) {
add=0;
for(i=0; i< m ;i++){
add += a[i+m*row]*b[col+m*i];
}
c[row*m+col] = add;
}
} | b411d69f00124c8434b94726cc707d965e35ba30.cu | #include "includes.h"
__global__ void kernelMultMat(int *a, int *b, int *c,int m){
int i,add;
int col=blockDim.x*blockIdx.x + threadIdx.x;
int row=blockDim.y*blockIdx.y + threadIdx.y;
if(col<m && row<m) {
add=0;
for(i=0; i< m ;i++){
add += a[i+m*row]*b[col+m*i];
}
c[row*m+col] = add;
}
} |
0f40b38a04b634ec05523ce272c3dece7effb167.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void callOperationSharedDynamic(int *a, int *b, int *res, int x, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n)
{
return;
}
extern __shared__ int arrays[];
__shared__ int s_x;
int *s_a = arrays;
int *s_b = &s_a[n];
int *s_res = &s_b[n];
s_x = x;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] - (s_b[tid] * s_x);
res[tid] = s_res[tid];
} | 0f40b38a04b634ec05523ce272c3dece7effb167.cu | #include "includes.h"
__global__ void callOperationSharedDynamic(int *a, int *b, int *res, int x, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n)
{
return;
}
extern __shared__ int arrays[];
__shared__ int s_x;
int *s_a = arrays;
int *s_b = &s_a[n];
int *s_res = &s_b[n];
s_x = x;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] - (s_b[tid] * s_x);
res[tid] = s_res[tid];
} |
b9fe3941bee66e07a7882b8df797cef448456c7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
#define MAX_BLOCK 512
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
hipStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK hipDeviceSynchronize(); \
{ \
hipError_t cuerr = hipGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << hipGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
hipStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(hipMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__inline__ __device__
int fake_shfl_down(int val, int offset, int width = 32) {
static __shared__ int shared[MAX_BLOCK];
int lane = threadIdx.x % 32;
shared[threadIdx.x] = val;
__syncthreads();
val = (lane + offset<width) ? shared[threadIdx.x + offset] : 0;
__syncthreads();
return val;
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(hipMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
hipMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(hipMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
hipStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(hipFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(hipMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(hipDeviceSynchronize());
safeCall(hipMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
hipMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
hipMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(hipDeviceSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(hipMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
hipMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
hipMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
hipMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
hipFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
hipMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),hipMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
hipStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
hipMalloc((void**)&buffer1, nump*sizeof(int));
hipMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
hipMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
hipFree(buffer1);
hipFree(buffer2);
hipFree(buffer3);
hipMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(hipMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
hipMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
hipMemcpyAsync(h_desc.data, d_desc.data, numPts*61, hipMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += fake_shfl_down(acc_reg, 1);
acc_reg += fake_shfl_down(acc_reg, 2);
acc_reg += fake_shfl_down(acc_reg, 4);
acc_reg += fake_shfl_down(acc_reg, 8);
acc_reg += fake_shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//hipMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
hipMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
hipMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
hipMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
hipMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, hipMemcpyHostToDevice);
unsigned char *desct_d;
hipMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
hipMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
hipMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, hipMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
hipMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
hipFree(descq_d);
hipFree(desct_d);
hipFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
hipMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
hipMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(hipMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
hipMemcpyHostToDevice));
// TimerGPU timer0(0);
hipStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
| b9fe3941bee66e07a7882b8df797cef448456c7e.cu | #include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
#define MAX_BLOCK 512
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
cudaStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK cudaDeviceSynchronize(); \
{ \
cudaError_t cuerr = cudaGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << cudaGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
cudaStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(cudaMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__inline__ __device__
int fake_shfl_down(int val, int offset, int width = 32) {
static __shared__ int shared[MAX_BLOCK];
int lane = threadIdx.x % 32;
shared[threadIdx.x] = val;
__syncthreads();
val = (lane + offset<width) ? shared[threadIdx.x + offset] : 0;
__syncthreads();
return val;
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(cudaMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
cudaMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(cudaMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
cudaStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(cudaFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(cudaMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(cudaThreadSynchronize());
safeCall(cudaMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
cudaMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
cudaMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(cudaThreadSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(cudaMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
cudaMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
cudaMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
cudaMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
cudaFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
cudaMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),cudaMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
cudaStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
cudaMalloc((void**)&buffer1, nump*sizeof(int));
cudaMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
cudaMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
cudaFree(buffer1);
cudaFree(buffer2);
cudaFree(buffer3);
cudaMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(cudaMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
cudaMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
cudaMemcpyAsync(h_desc.data, d_desc.data, numPts*61, cudaMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += fake_shfl_down(acc_reg, 1);
acc_reg += fake_shfl_down(acc_reg, 2);
acc_reg += fake_shfl_down(acc_reg, 4);
acc_reg += fake_shfl_down(acc_reg, 8);
acc_reg += fake_shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//cudaMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
cudaMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
cudaMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
cudaMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
cudaMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, cudaMemcpyHostToDevice);
unsigned char *desct_d;
cudaMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
cudaMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
cudaMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, cudaMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
cudaMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
cudaFree(descq_d);
cudaFree(desct_d);
cudaFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
cudaMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
cudaMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(cudaMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
cudaMemcpyHostToDevice));
// TimerGPU timer0(0);
cudaStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
|
46e68745a086d5293d7ccb182dbbb5789fca961d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuNVSM/updates.h"
template <typename FloatT>
AdamTransformGradientUpdater<FloatT>::AdamTransformGradientUpdater(
const size_t source_vector_dim,
const size_t target_vector_dim,
Streams* const streams,
const FloatT beta1,
const FloatT beta2,
const FloatT epsilon)
: TransformGradientUpdater<FloatT>(
epsilon,
{new TransformStorage<FloatT>(source_vector_dim,
target_vector_dim,
streams), /* m_prev */
new TransformStorage<FloatT>(source_vector_dim,
target_vector_dim,
streams) /* v_prev */}),
beta1_(beta1), beta2_(beta2), t_(1) {}
template <typename FloatT>
void adam_update(const device_matrix<FloatT>& m,
const device_matrix<FloatT>& v,
device_matrix<FloatT>* grad,
const FloatT bias_correction,
const FloatT epsilon) {
CHECK_DIMENSIONS_EQUAL(m, *grad);
CHECK_DIMENSIONS_EQUAL(v, *grad);
thrust::transform(
thrust::make_transform_iterator(
m.begin(),
func::scale_by_constant<FloatT>(bias_correction)),
thrust::make_transform_iterator(
m.end(),
func::scale_by_constant<FloatT>(bias_correction)),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
v.begin(),
func::sqrt<FloatT>()),
func::add_constant<FloatT>(epsilon)),
grad->begin(),
thrust::divides<FloatT>());
}
template <typename FloatT>
void AdamTransformGradientUpdater<FloatT>::update(
TransformStorage<FloatT>* const storage,
typename TransformStorage<FloatT>::GradientType* const gradient_desc,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams) {
CHECK(storage != nullptr);
apply_regularization(
streams->next(),
scaled_regularization_lambda,
storage,
gradient_desc);
// Update m_t.
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->update(
*gradient_desc,
1.0 - beta1_, /* learning_rate */
1.0, /* regularization_lambda */
streams,
func::identity<FloatT>() /* update_transform_op */);
// Update v_t.
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->update(
*gradient_desc,
1.0 - beta2_, /* learning_rate */
1.0, /* regularization_lambda */
streams,
func::square<FloatT>() /* update_transform_op */);
device_matrix<FloatT>& grad_transform = std::get<0>(*gradient_desc);
device_matrix<FloatT>& grad_bias = std::get<1>(*gradient_desc);
device_matrix<FloatT>* m_transform;
device_matrix<FloatT>* m_bias;
device_matrix<FloatT>* v_transform;
device_matrix<FloatT>* v_bias;
std::tie(m_transform, m_bias) =
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->get();
std::tie(v_transform, v_bias) =
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->get();
const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_));
adam_update(*m_transform, *v_transform, &grad_transform, bias_correction, this->epsilon_);
adam_update(*m_bias, *v_bias, &grad_bias, bias_correction, this->epsilon_);
t_ += 1;
CHECK_MATRIX(grad_transform);
CHECK_MATRIX(grad_bias);
return storage->update(
*gradient_desc, learning_rate,
static_cast<FloatT>(0.0) /* regularization_lambda */,
streams);
}
#define DENSE_UPDATE_DENSE_VARIANCE AdamConf::DENSE_UPDATE_DENSE_VARIANCE
#define DENSE_UPDATE AdamConf::DENSE_UPDATE
#define SPARSE AdamConf::SPARSE
template <typename FloatT, typename IdxType>
AdamRepresentationsGradientUpdater<FloatT, IdxType>::AdamRepresentationsGradientUpdater(
const size_t num_objects,
const size_t repr_size,
const AdamConf& conf,
Streams* const streams,
const FloatT beta1,
const FloatT beta2,
const FloatT epsilon)
: RepresentationsGradientUpdater<FloatT, IdxType>(
epsilon,
{new RepresentationsStorage<FloatT, IdxType>(num_objects,
repr_size,
streams), /* m_prev */
new RepresentationsStorage<FloatT, IdxType>(num_objects,
conf.mode() < DENSE_UPDATE_DENSE_VARIANCE ? 1 : repr_size /* repr_size */,
streams) /* v_prev */}),
conf_(conf),
beta1_(beta1), beta2_(beta2),
t_(1) {}
template <typename FloatT, typename IdxType>
__global__
void adam_sparse_update_kernel(const size_t window_size,
const IdxType* const indices,
const FloatT* const m,
const FloatT* const v,
FloatT* const grad,
const FloatT bias_correction,
const FloatT epsilon) {
FloatT agg_m = 0.0;
FloatT agg_v = 0.0;
for (IdxType w = 0; w < window_size; ++w) {
agg_m += m[indices[blockIdx.x * window_size + w] * blockDim.x + threadIdx.x];
agg_v += v[indices[blockIdx.x * window_size + w]];
}
agg_m /= window_size;
agg_v /= window_size;
grad[blockIdx.x * blockDim.x + threadIdx.x] = bias_correction * agg_m / (::sqrt(agg_v) + epsilon);
}
template <typename FloatT, typename IdxType>
void AdamRepresentationsGradientUpdater<FloatT, IdxType>::update(
RepresentationsStorage<FloatT, IdxType>* const storage,
typename RepresentationsStorage<FloatT, IdxType>::GradientType* const gradient_descs,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams) {
CHECK(storage != nullptr);
const bool use_sgd_regularization = (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE);
if (use_sgd_regularization) {
LOG_IF_EVERY_N(WARNING, scaled_regularization_lambda > 0.0, 10000)
<< "Sparse variants of Adam currently do not correctly implement l2 regularization.";
}
RepresentationsStorage<FloatT, IdxType>* const m_storage =
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[0].get());
RepresentationsStorage<FloatT, IdxType>* const v_storage =
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[1].get());
device_matrix<FloatT>* m = m_storage->get();
device_matrix<FloatT>* v = v_storage->get();
// Invariants.
for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) {
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
const size_t repr_size = gradient.getRows();
const size_t num_grads = gradient.getCols();
CHECK_EQ((dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[0].get())->get()->getRows()),
repr_size);
const device_matrix<IdxType>& indices = std::get<1>(gradient_desc);
const size_t window_size = std::get<2>(gradient_desc);
CHECK_EQ(indices.getCols() % window_size, 0);
}
// Update m_t.
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(this->storages_[0].get())->update(
*gradient_descs,
1.0 - beta1_, /* learning_rate */
1.0, /* regularization_lambda */
streams);
// Add regularization within m_t.
if (!use_sgd_regularization) {
// m_t = beta1 * m_{t-1} + (1.0 - beta1) grad
// with grad = grad - lambda * params
// m_t = beta1 * m_{t-1} + (1.0 - beta1) grad - (1.0 - beta1) * lambda * params
apply_regularization(
streams->next(),
static_cast<FloatT>((1.0 - beta1_) * scaled_regularization_lambda),
storage->get(),
m);
}
// Update v_t.
if (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE) {
std::vector<std::unique_ptr<device_matrix<FloatT>>> matrix_ptrs; // For memory management.
std::vector<typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType> average_squared_gradients;
for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) {
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
matrix_ptrs.push_back(
std::unique_ptr<device_matrix<FloatT>>(
new device_matrix<FloatT>(
1, /* num_rows */
gradient.getCols(),
gradient.getStream())));
device_matrix<FloatT>* const average_squared_gradient = matrix_ptrs.back().get();
reduce_axis<FloatT, func::square<FloatT>>(
average_squared_gradient->getStream(),
FIRST_AXIS,
gradient,
average_squared_gradient);
average_squared_gradient->scale(
average_squared_gradient->getStream(),
exp(-log(gradient.getRows())));
average_squared_gradients.push_back(
std::forward_as_tuple(*average_squared_gradient,
std::get<1>(gradient_desc),
std::get<2>(gradient_desc),
std::get<3>(gradient_desc)));
}
v_storage->update(average_squared_gradients,
1.0 - beta2_, /* learning_rate */
1.0, /* regularization_lambda */
streams);
} else {
CHECK(!use_sgd_regularization);
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> agg_repr_grad(
new RepresentationsStorage<FloatT, IdxType>(
v_storage->num_objects(),
v_storage->repr_size(),
DefaultStream::get()));
agg_repr_grad->initialize_with_null();
agg_repr_grad->update(*gradient_descs,
1.0, /* learning_rate */
0.0, /* scaled_regularization_lambda */
streams);
apply_regularization(
streams->next(),
scaled_regularization_lambda,
storage->get(),
agg_repr_grad->get());
agg_repr_grad->get()->square(agg_repr_grad->get()->getStream()); // g_t^2
v_storage->update_dense(merge_streams(v->getStream(),
agg_repr_grad->get()->getStream()),
begin(*agg_repr_grad->get()),
1.0 - beta2_, /* learning_rate */
1.0 /* regularization_lambda */);
}
// Compute update.
const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_));
t_ += 1;
if (conf_.mode() >= DENSE_UPDATE) {
const hipStream_t m_v_stream = merge_streams(
m->getStream(), v->getStream());
if (conf_.mode() == DENSE_UPDATE) {
return storage->update_dense(
m_v_stream,
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
begin(*m),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_permutation_iterator(
begin(*v), /* elements */
make_matrix_column_iterator(*m) /* map */),
func::sqrt<FloatT>()),
func::add_constant<FloatT>(this->epsilon_)))),
func::divides_tuple<FloatT>()),
func::scale_by_constant<FloatT>(bias_correction)),
learning_rate,
scaled_regularization_lambda);
} else if (conf_.mode() == DENSE_UPDATE_DENSE_VARIANCE) {
return storage->update_dense(
m_v_stream,
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
begin(*m),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
begin(*v), /* elements */
func::sqrt<FloatT>()),
func::add_constant<FloatT>(this->epsilon_)))),
func::divides_tuple<FloatT>()),
func::scale_by_constant<FloatT>(bias_correction)),
learning_rate,
0.0 /* scaled_regularzation_lambda */);
} else {
LOG(FATAL) << "Invalid mode configuration.";
}
} else {
// This is a variant of the sparse implementation of Adam.
//
// Statistics are kept on a per-representation level, but updates are averaged over
// all objects in one window.
//
// The true sparse algorithm would track statistics per representation, but not
// spread the updates over all objects in one window. It would simply load the
// right update for every object in the batch. However, this would require
// deduplicating (i.e., sorting) the object indices, resizing the indices and update
// tensors and then copying. This is probably more expensive than simply computing the full
// update, as this avoids the deduplication step (albeit more expensive memory-wise,
// depending on the batch size, corpus size and the distribution of instances).
//
// TODO(cvangysel): implement an option to compute the full gradient.
CHECK_EQ(gradient_descs->size(), 1)
<< "Sparse Adam currently does not implement multiple gradients.";
const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc = gradient_descs->front();
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
const size_t repr_size = gradient.getRows();
const size_t num_grads = gradient.getCols();
const device_matrix<IdxType>& indices = std::get<1>(gradient_desc);
const size_t window_size = std::get<2>(gradient_desc);
LAUNCH_KERNEL(
hipLaunchKernelGGL(( adam_sparse_update_kernel), dim3(num_grads), /* num_blocks */
repr_size, /* threads_per_block */
0,
merge_streams(
m->getStream(),
v->getStream()),
window_size,
indices.getData(),
m->getData(),
v->getData(),
gradient.getData(),
bias_correction,
this->epsilon_));
CHECK_MATRIX(gradient);
return storage->update(
*gradient_descs,
learning_rate,
use_sgd_regularization
? scaled_regularization_lambda
: static_cast<FloatT>(0.0), /* scaled_regularization_lambda */
streams);
}
}
// Explicit instantiations.
template class AdamTransformGradientUpdater<FLOATING_POINT_TYPE>;
template class AdamRepresentationsGradientUpdater<FLOATING_POINT_TYPE, int32>; | 46e68745a086d5293d7ccb182dbbb5789fca961d.cu | #include "cuNVSM/updates.h"
template <typename FloatT>
AdamTransformGradientUpdater<FloatT>::AdamTransformGradientUpdater(
const size_t source_vector_dim,
const size_t target_vector_dim,
Streams* const streams,
const FloatT beta1,
const FloatT beta2,
const FloatT epsilon)
: TransformGradientUpdater<FloatT>(
epsilon,
{new TransformStorage<FloatT>(source_vector_dim,
target_vector_dim,
streams), /* m_prev */
new TransformStorage<FloatT>(source_vector_dim,
target_vector_dim,
streams) /* v_prev */}),
beta1_(beta1), beta2_(beta2), t_(1) {}
template <typename FloatT>
void adam_update(const device_matrix<FloatT>& m,
const device_matrix<FloatT>& v,
device_matrix<FloatT>* grad,
const FloatT bias_correction,
const FloatT epsilon) {
CHECK_DIMENSIONS_EQUAL(m, *grad);
CHECK_DIMENSIONS_EQUAL(v, *grad);
thrust::transform(
thrust::make_transform_iterator(
m.begin(),
func::scale_by_constant<FloatT>(bias_correction)),
thrust::make_transform_iterator(
m.end(),
func::scale_by_constant<FloatT>(bias_correction)),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
v.begin(),
func::sqrt<FloatT>()),
func::add_constant<FloatT>(epsilon)),
grad->begin(),
thrust::divides<FloatT>());
}
template <typename FloatT>
void AdamTransformGradientUpdater<FloatT>::update(
TransformStorage<FloatT>* const storage,
typename TransformStorage<FloatT>::GradientType* const gradient_desc,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams) {
CHECK(storage != nullptr);
apply_regularization(
streams->next(),
scaled_regularization_lambda,
storage,
gradient_desc);
// Update m_t.
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->update(
*gradient_desc,
1.0 - beta1_, /* learning_rate */
1.0, /* regularization_lambda */
streams,
func::identity<FloatT>() /* update_transform_op */);
// Update v_t.
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->update(
*gradient_desc,
1.0 - beta2_, /* learning_rate */
1.0, /* regularization_lambda */
streams,
func::square<FloatT>() /* update_transform_op */);
device_matrix<FloatT>& grad_transform = std::get<0>(*gradient_desc);
device_matrix<FloatT>& grad_bias = std::get<1>(*gradient_desc);
device_matrix<FloatT>* m_transform;
device_matrix<FloatT>* m_bias;
device_matrix<FloatT>* v_transform;
device_matrix<FloatT>* v_bias;
std::tie(m_transform, m_bias) =
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->get();
std::tie(v_transform, v_bias) =
dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->get();
const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_));
adam_update(*m_transform, *v_transform, &grad_transform, bias_correction, this->epsilon_);
adam_update(*m_bias, *v_bias, &grad_bias, bias_correction, this->epsilon_);
t_ += 1;
CHECK_MATRIX(grad_transform);
CHECK_MATRIX(grad_bias);
return storage->update(
*gradient_desc, learning_rate,
static_cast<FloatT>(0.0) /* regularization_lambda */,
streams);
}
#define DENSE_UPDATE_DENSE_VARIANCE AdamConf::DENSE_UPDATE_DENSE_VARIANCE
#define DENSE_UPDATE AdamConf::DENSE_UPDATE
#define SPARSE AdamConf::SPARSE
template <typename FloatT, typename IdxType>
AdamRepresentationsGradientUpdater<FloatT, IdxType>::AdamRepresentationsGradientUpdater(
const size_t num_objects,
const size_t repr_size,
const AdamConf& conf,
Streams* const streams,
const FloatT beta1,
const FloatT beta2,
const FloatT epsilon)
: RepresentationsGradientUpdater<FloatT, IdxType>(
epsilon,
{new RepresentationsStorage<FloatT, IdxType>(num_objects,
repr_size,
streams), /* m_prev */
new RepresentationsStorage<FloatT, IdxType>(num_objects,
conf.mode() < DENSE_UPDATE_DENSE_VARIANCE ? 1 : repr_size /* repr_size */,
streams) /* v_prev */}),
conf_(conf),
beta1_(beta1), beta2_(beta2),
t_(1) {}
template <typename FloatT, typename IdxType>
__global__
void adam_sparse_update_kernel(const size_t window_size,
const IdxType* const indices,
const FloatT* const m,
const FloatT* const v,
FloatT* const grad,
const FloatT bias_correction,
const FloatT epsilon) {
FloatT agg_m = 0.0;
FloatT agg_v = 0.0;
for (IdxType w = 0; w < window_size; ++w) {
agg_m += m[indices[blockIdx.x * window_size + w] * blockDim.x + threadIdx.x];
agg_v += v[indices[blockIdx.x * window_size + w]];
}
agg_m /= window_size;
agg_v /= window_size;
grad[blockIdx.x * blockDim.x + threadIdx.x] = bias_correction * agg_m / (::sqrt(agg_v) + epsilon);
}
template <typename FloatT, typename IdxType>
void AdamRepresentationsGradientUpdater<FloatT, IdxType>::update(
RepresentationsStorage<FloatT, IdxType>* const storage,
typename RepresentationsStorage<FloatT, IdxType>::GradientType* const gradient_descs,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams) {
CHECK(storage != nullptr);
const bool use_sgd_regularization = (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE);
if (use_sgd_regularization) {
LOG_IF_EVERY_N(WARNING, scaled_regularization_lambda > 0.0, 10000)
<< "Sparse variants of Adam currently do not correctly implement l2 regularization.";
}
RepresentationsStorage<FloatT, IdxType>* const m_storage =
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[0].get());
RepresentationsStorage<FloatT, IdxType>* const v_storage =
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[1].get());
device_matrix<FloatT>* m = m_storage->get();
device_matrix<FloatT>* v = v_storage->get();
// Invariants.
for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) {
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
const size_t repr_size = gradient.getRows();
const size_t num_grads = gradient.getCols();
CHECK_EQ((dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(
this->storages_[0].get())->get()->getRows()),
repr_size);
const device_matrix<IdxType>& indices = std::get<1>(gradient_desc);
const size_t window_size = std::get<2>(gradient_desc);
CHECK_EQ(indices.getCols() % window_size, 0);
}
// Update m_t.
dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(this->storages_[0].get())->update(
*gradient_descs,
1.0 - beta1_, /* learning_rate */
1.0, /* regularization_lambda */
streams);
// Add regularization within m_t.
if (!use_sgd_regularization) {
// m_t = beta1 * m_{t-1} + (1.0 - beta1) grad
// with grad = grad - lambda * params
// m_t = beta1 * m_{t-1} + (1.0 - beta1) grad - (1.0 - beta1) * lambda * params
apply_regularization(
streams->next(),
static_cast<FloatT>((1.0 - beta1_) * scaled_regularization_lambda),
storage->get(),
m);
}
// Update v_t.
if (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE) {
std::vector<std::unique_ptr<device_matrix<FloatT>>> matrix_ptrs; // For memory management.
std::vector<typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType> average_squared_gradients;
for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) {
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
matrix_ptrs.push_back(
std::unique_ptr<device_matrix<FloatT>>(
new device_matrix<FloatT>(
1, /* num_rows */
gradient.getCols(),
gradient.getStream())));
device_matrix<FloatT>* const average_squared_gradient = matrix_ptrs.back().get();
reduce_axis<FloatT, func::square<FloatT>>(
average_squared_gradient->getStream(),
FIRST_AXIS,
gradient,
average_squared_gradient);
average_squared_gradient->scale(
average_squared_gradient->getStream(),
exp(-log(gradient.getRows())));
average_squared_gradients.push_back(
std::forward_as_tuple(*average_squared_gradient,
std::get<1>(gradient_desc),
std::get<2>(gradient_desc),
std::get<3>(gradient_desc)));
}
v_storage->update(average_squared_gradients,
1.0 - beta2_, /* learning_rate */
1.0, /* regularization_lambda */
streams);
} else {
CHECK(!use_sgd_regularization);
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> agg_repr_grad(
new RepresentationsStorage<FloatT, IdxType>(
v_storage->num_objects(),
v_storage->repr_size(),
DefaultStream::get()));
agg_repr_grad->initialize_with_null();
agg_repr_grad->update(*gradient_descs,
1.0, /* learning_rate */
0.0, /* scaled_regularization_lambda */
streams);
apply_regularization(
streams->next(),
scaled_regularization_lambda,
storage->get(),
agg_repr_grad->get());
agg_repr_grad->get()->square(agg_repr_grad->get()->getStream()); // g_t^2
v_storage->update_dense(merge_streams(v->getStream(),
agg_repr_grad->get()->getStream()),
begin(*agg_repr_grad->get()),
1.0 - beta2_, /* learning_rate */
1.0 /* regularization_lambda */);
}
// Compute update.
const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_));
t_ += 1;
if (conf_.mode() >= DENSE_UPDATE) {
const cudaStream_t m_v_stream = merge_streams(
m->getStream(), v->getStream());
if (conf_.mode() == DENSE_UPDATE) {
return storage->update_dense(
m_v_stream,
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
begin(*m),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_permutation_iterator(
begin(*v), /* elements */
make_matrix_column_iterator(*m) /* map */),
func::sqrt<FloatT>()),
func::add_constant<FloatT>(this->epsilon_)))),
func::divides_tuple<FloatT>()),
func::scale_by_constant<FloatT>(bias_correction)),
learning_rate,
scaled_regularization_lambda);
} else if (conf_.mode() == DENSE_UPDATE_DENSE_VARIANCE) {
return storage->update_dense(
m_v_stream,
thrust::make_transform_iterator(
thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(
begin(*m),
thrust::make_transform_iterator(
thrust::make_transform_iterator(
begin(*v), /* elements */
func::sqrt<FloatT>()),
func::add_constant<FloatT>(this->epsilon_)))),
func::divides_tuple<FloatT>()),
func::scale_by_constant<FloatT>(bias_correction)),
learning_rate,
0.0 /* scaled_regularzation_lambda */);
} else {
LOG(FATAL) << "Invalid mode configuration.";
}
} else {
// This is a variant of the sparse implementation of Adam.
//
// Statistics are kept on a per-representation level, but updates are averaged over
// all objects in one window.
//
// The true sparse algorithm would track statistics per representation, but not
// spread the updates over all objects in one window. It would simply load the
// right update for every object in the batch. However, this would require
// deduplicating (i.e., sorting) the object indices, resizing the indices and update
// tensors and then copying. This is probably more expensive than simply computing the full
// update, as this avoids the deduplication step (albeit more expensive memory-wise,
// depending on the batch size, corpus size and the distribution of instances).
//
// TODO(cvangysel): implement an option to compute the full gradient.
CHECK_EQ(gradient_descs->size(), 1)
<< "Sparse Adam currently does not implement multiple gradients.";
const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc = gradient_descs->front();
device_matrix<FloatT>& gradient = std::get<0>(gradient_desc);
const size_t repr_size = gradient.getRows();
const size_t num_grads = gradient.getCols();
const device_matrix<IdxType>& indices = std::get<1>(gradient_desc);
const size_t window_size = std::get<2>(gradient_desc);
LAUNCH_KERNEL(
adam_sparse_update_kernel<<<num_grads, /* num_blocks */
repr_size, /* threads_per_block */
0,
merge_streams(
m->getStream(),
v->getStream())>>>(
window_size,
indices.getData(),
m->getData(),
v->getData(),
gradient.getData(),
bias_correction,
this->epsilon_));
CHECK_MATRIX(gradient);
return storage->update(
*gradient_descs,
learning_rate,
use_sgd_regularization
? scaled_regularization_lambda
: static_cast<FloatT>(0.0), /* scaled_regularization_lambda */
streams);
}
}
// Explicit instantiations.
template class AdamTransformGradientUpdater<FLOATING_POINT_TYPE>;
template class AdamRepresentationsGradientUpdater<FLOATING_POINT_TYPE, int32>; |
02089f93338bc0adeb98e0f946d40b4d640ec66b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <chrono>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust\device_vector.h>
#include <stream_compaction/efficient.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "device_launch_parameters.h"
#include "utilities.h"
#include "tiny_gltf.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
/*
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Triangle * dev_meshes = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_cached_intersections = NULL;
static int* dev_leftover_indices = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_meshes, scene->mesh.num_triangles * sizeof(Triangle));
hipMemcpy(dev_meshes, scene->mesh.triangles.data(), scene->mesh.num_triangles * sizeof(Triangle), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_cached_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_leftover_indices, pixelcount * sizeof(int));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_meshes);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_cached_intersections);
hipFree(dev_leftover_indices);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, int* dev_leftover_indices)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, segment.remainingBounces);
float jittered_x = x;
float jittered_y = y;
#if ANTI_ALIASING
thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f);
jittered_x += u01(rng);
jittered_y += u01(rng);
#endif
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)jittered_x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)jittered_y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
dev_leftover_indices[index] = index;
segment.remainingBounces = traceDepth;
}
}
/*
* Handles ray intersections with cube and sphere
*/
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle * triangles
, int num_triangles
, int geoms_size
, ShadeableIntersection * intersections
, int iter
, int* dev_leftover_indices
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
int path_index = dev_leftover_indices[idx];
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
#if MOTION_BLUR
glm::mat4 start = geom.initial_transform;
glm::mat4 off(1.f);
off[2] += glm::vec4(0.f, MOTION_BLUR_OFFSET, 0.f, 0.f);
geom.transform = start + off * (iter / 300.f);
geom.inverseTransform = glm::inverse(geom.transform);
geom.invTranspose = glm::transpose(glm::inverse(geom.transform));
#endif
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if(geom.type == MESH){
t = meshIntersectionTest(triangles, num_triangles, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersection = tmp_intersect;
}
}
}
/*
* Real Shader
*/
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, int* dev_leftover_indices
)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_paths && pathSegments[dev_leftover_indices[id]].remainingBounces > 0)
{
int idx = dev_leftover_indices[id];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray and terminate that path
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}else {
scatterRay(pathSegments[idx], intersection.intersection, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces -= 1;
}
} else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
// Need to remove the dead rays from next iteration
#if COMPACT_RAYS
if (pathSegments[idx].remainingBounces <= 0)
dev_leftover_indices[id] = -1;
#endif
}
}
/*
* Add the current iteration's output to the overall image
*/
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/*
* Was using it with thrust::removeif, using my own stream compaction now
*/
struct is_complete
{
__host__ __device__
bool operator()(const int idx)
{
return (idx == -1);
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
auto start = std::chrono::high_resolution_clock::now();
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, dev_leftover_indices);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
ShadeableIntersection* dev_cur_intersections = dev_intersections;
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if CACHE_FIRST_BOUNCE && !ANTI_ALIASING
if(depth == 0 && iter != 1) {
dev_cur_intersections = dev_cached_intersections;
}else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_meshes
, hst_scene->mesh.num_triangles
, hst_scene->geoms.size()
, dev_intersections
, iter
, dev_leftover_indices
);
if(depth == 0 && iter == 1){
hipMemcpy(dev_cached_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
}
#else
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_meshes
, hst_scene->mesh.num_triangles
, hst_scene->geoms.size()
, dev_intersections
, iter
, dev_leftover_indices
);
#endif
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
#if MATERIAL_BASED_SORT
thrust::device_ptr<ShadeableIntersection> thrust_dev_intersections(dev_intersections);
thrust::device_ptr<PathSegment> thrust_dev_paths(dev_paths);
thrust::sort_by_key(thrust::device, thrust_dev_intersections, thrust_dev_intersections + num_paths, thrust_dev_paths);
#endif
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_cur_intersections,
dev_paths,
dev_materials,
depth,
dev_leftover_indices
);
#if COMPACT_RAYS
num_paths = StreamCompaction::Shared::compactCUDA(num_paths, dev_leftover_indices);
#endif
if((depth == traceDepth) || num_paths == 0)
iterationComplete = true;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
if (iter <= 10)
std::cout << elapsed.count() << "\n";
}
| 02089f93338bc0adeb98e0f946d40b4d640ec66b.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <chrono>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust\device_vector.h>
#include <stream_compaction/efficient.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "device_launch_parameters.h"
#include "utilities.h"
#include "tiny_gltf.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
/*
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Triangle * dev_meshes = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_cached_intersections = NULL;
static int* dev_leftover_indices = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_meshes, scene->mesh.num_triangles * sizeof(Triangle));
cudaMemcpy(dev_meshes, scene->mesh.triangles.data(), scene->mesh.num_triangles * sizeof(Triangle), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_cached_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_leftover_indices, pixelcount * sizeof(int));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_meshes);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_cached_intersections);
cudaFree(dev_leftover_indices);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, int* dev_leftover_indices)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, segment.remainingBounces);
float jittered_x = x;
float jittered_y = y;
#if ANTI_ALIASING
thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f);
jittered_x += u01(rng);
jittered_y += u01(rng);
#endif
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)jittered_x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)jittered_y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
dev_leftover_indices[index] = index;
segment.remainingBounces = traceDepth;
}
}
/*
* Handles ray intersections with cube and sphere
*/
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, Triangle * triangles
, int num_triangles
, int geoms_size
, ShadeableIntersection * intersections
, int iter
, int* dev_leftover_indices
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
int path_index = dev_leftover_indices[idx];
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
#if MOTION_BLUR
glm::mat4 start = geom.initial_transform;
glm::mat4 off(1.f);
off[2] += glm::vec4(0.f, MOTION_BLUR_OFFSET, 0.f, 0.f);
geom.transform = start + off * (iter / 300.f);
geom.inverseTransform = glm::inverse(geom.transform);
geom.invTranspose = glm::transpose(glm::inverse(geom.transform));
#endif
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if(geom.type == MESH){
t = meshIntersectionTest(triangles, num_triangles, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersection = tmp_intersect;
}
}
}
/*
* Real Shader
*/
__global__ void shadeMaterial(
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int depth
, int* dev_leftover_indices
)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_paths && pathSegments[dev_leftover_indices[id]].remainingBounces > 0)
{
int idx = dev_leftover_indices[id];
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray and terminate that path
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}else {
scatterRay(pathSegments[idx], intersection.intersection, intersection.surfaceNormal, material, rng);
pathSegments[idx].remainingBounces -= 1;
}
} else {
// If there was no intersection, color the ray black.
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
// Need to remove the dead rays from next iteration
#if COMPACT_RAYS
if (pathSegments[idx].remainingBounces <= 0)
dev_leftover_indices[id] = -1;
#endif
}
}
/*
* Add the current iteration's output to the overall image
*/
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/*
* Was using it with thrust::removeif, using my own stream compaction now
*/
struct is_complete
{
__host__ __device__
bool operator()(const int idx)
{
return (idx == -1);
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
auto start = std::chrono::high_resolution_clock::now();
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths, dev_leftover_indices);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
ShadeableIntersection* dev_cur_intersections = dev_intersections;
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
#if CACHE_FIRST_BOUNCE && !ANTI_ALIASING
if(depth == 0 && iter != 1) {
dev_cur_intersections = dev_cached_intersections;
}else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_meshes
, hst_scene->mesh.num_triangles
, hst_scene->geoms.size()
, dev_intersections
, iter
, dev_leftover_indices
);
if(depth == 0 && iter == 1){
cudaMemcpy(dev_cached_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
}
#else
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_meshes
, hst_scene->mesh.num_triangles
, hst_scene->geoms.size()
, dev_intersections
, iter
, dev_leftover_indices
);
#endif
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
#if MATERIAL_BASED_SORT
thrust::device_ptr<ShadeableIntersection> thrust_dev_intersections(dev_intersections);
thrust::device_ptr<PathSegment> thrust_dev_paths(dev_paths);
thrust::sort_by_key(thrust::device, thrust_dev_intersections, thrust_dev_intersections + num_paths, thrust_dev_paths);
#endif
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
num_paths,
dev_cur_intersections,
dev_paths,
dev_materials,
depth,
dev_leftover_indices
);
#if COMPACT_RAYS
num_paths = StreamCompaction::Shared::compactCUDA(num_paths, dev_leftover_indices);
#endif
if((depth == traceDepth) || num_paths == 0)
iterationComplete = true;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
if (iter <= 10)
std::cout << elapsed.count() << "\n";
}
|
3e7f282922c46db5f92f8275fd1f0ab809abb9fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <stack>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash_quda.h>
#include <color_spinor_field_order.h>
#include <clover_field_order.h>
#include <index_helper.cuh>
#include <color_spinor.h>
#include <linalg.cuh>
#include <dslash_policy.cuh>
namespace quda {
// these should not be namespaced!!
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to hipMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
static std::stack<bool> kptstack;
void pushKernelPackT(bool packT)
{
kptstack.push(getKernelPackT());
setKernelPackT(packT);
if (kptstack.size() > 10)
{
warningQuda("KernelPackT stack contains %u elements. Is there a missing popKernelPackT() somewhere?",
static_cast<unsigned int>(kptstack.size()));
}
}
void popKernelPackT()
{
if (kptstack.empty())
{
errorQuda("popKernelPackT() called with empty stack");
}
setKernelPackT(kptstack.top());
kptstack.pop();
}
namespace dslash {
int it = 0;
hipEvent_t packEnd[2];
hipEvent_t gatherStart[Nstream];
hipEvent_t gatherEnd[Nstream];
hipEvent_t scatterStart[Nstream];
hipEvent_t scatterEnd[Nstream];
hipEvent_t dslashStart[2];
// these variables are used for benchmarking the dslash components in isolation
bool dslash_pack_compute;
bool dslash_interior_compute;
bool dslash_exterior_compute;
bool dslash_comms;
bool dslash_copy;
// whether the dslash policy tuner has been enabled
bool dslash_policy_init;
// used to keep track of which policy to start the autotuning
int first_active_policy;
int first_active_p2p_policy;
// list of dslash policies that are enabled
std::vector<QudaDslashPolicy> policies;
// list of p2p policies that are enabled
std::vector<QudaP2PPolicy> p2p_policies;
// string used as a tunekey to ensure we retune if the dslash policy env changes
char policy_string[TuneKey::aux_n];
// FIX this is a hack from hell
// Auxiliary work that can be done while waiting on comms to finis
Worker *aux_worker;
#if TORCH_HIP_VERSION >= 8000
cuuint32_t *commsEnd_h;
hipDeviceptr_t commsEnd_d[Nstream];
#endif
}
void createDslashEvents()
{
using namespace dslash;
// add hipEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
hipEventCreateWithFlags(&gatherStart[i], hipEventDisableTiming);
hipEventCreateWithFlags(&gatherEnd[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterStart[i], hipEventDisableTiming);
hipEventCreateWithFlags(&scatterEnd[i], hipEventDisableTiming);
}
for (int i=0; i<2; i++) {
hipEventCreateWithFlags(&packEnd[i], hipEventDisableTiming);
hipEventCreateWithFlags(&dslashStart[i], hipEventDisableTiming);
}
aux_worker = NULL;
#if TORCH_HIP_VERSION >= 8000
commsEnd_h = static_cast<cuuint32_t*>(mapped_malloc(Nstream*sizeof(int)));
for (int i=0; i<Nstream; i++) {
hipHostGetDevicePointer((void**)&commsEnd_d[i], commsEnd_h+i, 0);
commsEnd_h[i] = 0;
}
#endif
checkCudaError();
dslash_pack_compute = true;
dslash_interior_compute = true;
dslash_exterior_compute = true;
dslash_comms = true;
dslash_copy = true;
dslash_policy_init = false;
first_active_policy = 0;
first_active_p2p_policy = 0;
// list of dslash policies that are enabled
policies = std::vector<QudaDslashPolicy>(
static_cast<int>(QudaDslashPolicy::QUDA_DSLASH_POLICY_DISABLED), QudaDslashPolicy::QUDA_DSLASH_POLICY_DISABLED);
// list of p2p policies that are enabled
p2p_policies = std::vector<QudaP2PPolicy>(
static_cast<int>(QudaP2PPolicy::QUDA_P2P_POLICY_DISABLED), QudaP2PPolicy::QUDA_P2P_POLICY_DISABLED);
strcat(policy_string, ",pol=");
}
void destroyDslashEvents()
{
using namespace dslash;
#if TORCH_HIP_VERSION >= 8000
host_free(commsEnd_h);
commsEnd_h = 0;
#endif
for (int i=0; i<Nstream; i++) {
hipEventDestroy(gatherStart[i]);
hipEventDestroy(gatherEnd[i]);
hipEventDestroy(scatterStart[i]);
hipEventDestroy(scatterEnd[i]);
}
for (int i=0; i<2; i++) {
hipEventDestroy(packEnd[i]);
hipEventDestroy(dslashStart[i]);
}
checkCudaError();
}
/**
@brief Parameter structure for driving the Gamma operator
*/
template <typename Float, int nColor>
struct GammaArg {
typedef typename colorspinor_mapper<Float,4,nColor>::type F;
typedef typename mapper<Float>::type RegType;
F out; // output vector field
const F in; // input vector field
const int d; // which gamma matrix are we applying
const int nParity; // number of parities we're working on
bool doublet; // whether we applying the operator to a doublet
const int volumeCB; // checkerboarded volume
RegType a; // scale factor
RegType b; // chiral twist
RegType c; // flavor twist
GammaArg(ColorSpinorField &out, const ColorSpinorField &in, int d,
RegType kappa=0.0, RegType mu=0.0, RegType epsilon=0.0,
bool dagger=false, QudaTwistGamma5Type twist=QUDA_TWIST_GAMMA5_INVALID)
: out(out), in(in), d(d), nParity(in.SiteSubset()),
doublet(in.TwistFlavor() == QUDA_TWIST_DEG_DOUBLET || in.TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET),
volumeCB(doublet ? in.VolumeCB()/2 : in.VolumeCB()), a(0.0), b(0.0), c(0.0)
{
if (d < 0 || d > 4) errorQuda("Undefined gamma matrix %d", d);
if (in.Nspin() != 4) errorQuda("Cannot apply gamma5 to nSpin=%d field", in.Nspin());
if (!in.isNative() || !out.isNative()) errorQuda("Unsupported field order out=%d in=%d\n", out.FieldOrder(), in.FieldOrder());
if (in.TwistFlavor() == QUDA_TWIST_SINGLET) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
b = 2.0 * kappa * mu;
a = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
b = -2.0 * kappa * mu;
a = 1.0 / (1.0 + b * b);
}
c = 0.0;
if (dagger) b *= -1.0;
} else if (doublet) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
b = 2.0 * kappa * mu;
c = -2.0 * kappa * epsilon;
a = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
b = -2.0 * kappa * mu;
c = 2.0 * kappa * epsilon;
a = 1.0 / (1.0 + b * b - c * c);
if (a <= 0) errorQuda("Invalid twisted mass parameters (kappa=%e, mu=%e, epsilon=%e)\n", kappa, mu, epsilon);
}
if (dagger) b *= -1.0;
}
}
};
// CPU kernel for applying the gamma matrix to a colorspinor
template <typename Float, int nColor, typename Arg>
void gammaCPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
for (int parity= 0; parity < arg.nParity; parity++) {
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = in.gamma(arg.d);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the gamma matrix to a colorspinor
template <typename Float, int nColor, int d, typename Arg>
__global__ void gammaGPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = in.gamma(d);
}
template <typename Float, int nColor, typename Arg>
class Gamma : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const { return 0; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Gamma(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~Gamma() { }
void apply(const hipStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
gammaCPU<Float,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch (arg.d) {
case 4:hipLaunchKernelGGL(( gammaGPU<Float,nColor,4>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); }
void postTune() { arg.out.load(); }
};
template <typename Float, int nColor>
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
GammaArg<Float,nColor> arg(out, in, d);
Gamma<Float,nColor,GammaArg<Float,nColor> > gamma(arg, in);
gamma.apply(streams[Nstream-1]);
}
// template on the number of colors
template <typename Float>
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
if (in.Ncolor() == 3) {
ApplyGamma<Float,3>(out, in, d);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the Gamma matrix to a colorspinor field
//out(x) = gamma_d*in
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
checkPrecision(out, in); // check all precisions match
checkLocation(out, in); // check all locations match
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyGamma<double>(out, in, d);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyGamma<float>(out, in, d);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyGamma<short>(out, in, d);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyGamma<char>(out, in, d);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
}
// CPU kernel for applying the gamma matrix to a colorspinor
template <bool doublet, typename Float, int nColor, typename Arg>
void twistGammaCPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
for (int parity= 0; parity < arg.nParity; parity++) {
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
if (!doublet) {
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = arg.a * (in + arg.b * in.igamma(arg.d));
} else {
ColorSpinor<RegType,nColor,4> in_1 = arg.in(x_cb+0*arg.volumeCB, parity);
ColorSpinor<RegType,nColor,4> in_2 = arg.in(x_cb+1*arg.volumeCB, parity);
arg.out(x_cb + 0 * arg.volumeCB, parity) = arg.a * (in_1 + arg.b * in_1.igamma(arg.d) + arg.c * in_2);
arg.out(x_cb + 1 * arg.volumeCB, parity) = arg.a * (in_2 - arg.b * in_2.igamma(arg.d) + arg.c * in_1);
}
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the gamma matrix to a colorspinor
template <bool doublet, typename Float, int nColor, int d, typename Arg>
__global__ void twistGammaGPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (!doublet) {
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = arg.a * (in + arg.b * in.igamma(d));
} else {
ColorSpinor<RegType,nColor,4> in_1 = arg.in(x_cb+0*arg.volumeCB, parity);
ColorSpinor<RegType,nColor,4> in_2 = arg.in(x_cb+1*arg.volumeCB, parity);
arg.out(x_cb + 0 * arg.volumeCB, parity) = arg.a * (in_1 + arg.b * in_1.igamma(d) + arg.c * in_2);
arg.out(x_cb + 1 * arg.volumeCB, parity) = arg.a * (in_2 - arg.b * in_2.igamma(d) + arg.c * in_1);
}
}
template <typename Float, int nColor, typename Arg>
class TwistGamma : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const { return 0; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
TwistGamma(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~TwistGamma() { }
void apply(const hipStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
if (arg.doublet) twistGammaCPU<true,Float,nColor>(arg);
twistGammaCPU<false,Float,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (arg.doublet)
switch (arg.d) {
case 4:hipLaunchKernelGGL(( twistGammaGPU<true,Float,nColor,4>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
else
switch (arg.d) {
case 4:hipLaunchKernelGGL(( twistGammaGPU<false,Float,nColor,4>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); }
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); }
};
template <typename Float, int nColor>
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
GammaArg<Float,nColor> arg(out, in, d, kappa, mu, epsilon, dagger, type);
TwistGamma<Float,nColor,GammaArg<Float,nColor> > gamma(arg, in);
gamma.apply(streams[Nstream-1]);
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
if (in.Ncolor() == 3) {
ApplyTwistGamma<Float,3>(out, in, d, kappa, mu, epsilon, dagger, type);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the Gamma matrix to a colorspinor field
//out(x) = gamma_d*in
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
checkPrecision(out, in); // check all precisions match
checkLocation(out, in); // check all locations match
#ifdef GPU_TWISTED_MASS_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyTwistGamma<double>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyTwistGamma<float>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyTwistGamma<short>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyTwistGamma<char>(out, in, d, kappa, mu, epsilon, dagger, type);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
// Applies a gamma5 matrix to a spinor (wrapper to ApplyGamma)
void gamma5(ColorSpinorField &out, const ColorSpinorField &in) { ApplyGamma(out,in,4); }
/**
@brief Parameteter structure for driving the clover and twist-clover application kernels
@tparam Float Underlying storage precision
@tparam nSpin Number of spin components
@tparam nColor Number of colors
@tparam dynamic_clover Whether we are inverting the clover field on the fly
*/
template <typename Float, int nSpin, int nColor>
struct CloverArg {
static constexpr int length = (nSpin / (nSpin/2)) * 2 * nColor * nColor * (nSpin/2) * (nSpin/2) / 2;
static constexpr bool dynamic_clover = dynamic_clover_inverse();
typedef typename colorspinor_mapper<Float,nSpin,nColor>::type F;
typedef typename clover_mapper<Float,length>::type C;
typedef typename mapper<Float>::type RegType;
F out; // output vector field
const F in; // input vector field
const C clover; // clover field
const C cloverInv; // inverse clover field (only set if not dynamic clover and doing twisted clover)
const int nParity; // number of parities we're working on
const int parity; // which parity we're acting on (if nParity=1)
bool inverse; // whether we are applying the inverse
bool doublet; // whether we applying the operator to a doublet
const int volumeCB; // checkerboarded volume
RegType a;
RegType b;
RegType c;
QudaTwistGamma5Type twist;
CloverArg(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
bool inverse, int parity, RegType kappa=0.0, RegType mu=0.0, RegType epsilon=0.0,
bool dagger = false, QudaTwistGamma5Type twist=QUDA_TWIST_GAMMA5_INVALID)
: out(out), clover(clover, twist == QUDA_TWIST_GAMMA5_INVALID ? inverse : false),
cloverInv(clover, (twist != QUDA_TWIST_GAMMA5_INVALID && !dynamic_clover) ? true : false),
in(in), nParity(in.SiteSubset()), parity(parity), inverse(inverse),
doublet(in.TwistFlavor() == QUDA_TWIST_DEG_DOUBLET || in.TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET),
volumeCB(doublet ? in.VolumeCB()/2 : in.VolumeCB()), a(0.0), b(0.0), c(0.0), twist(twist)
{
if (in.TwistFlavor() == QUDA_TWIST_SINGLET) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
}
c = 0.0;
if (dagger) a *= -1.0;
} else if (doublet) {
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
}
};
template <typename Float, int nSpin, int nColor, typename Arg>
__device__ __host__ inline void cloverApply(Arg &arg, int x_cb, int parity) {
using namespace linalg; // for Cholesky
typedef typename mapper<Float>::type RegType;
typedef ColorSpinor<RegType, nColor, nSpin> Spinor;
typedef ColorSpinor<RegType, nColor, nSpin / 2> HalfSpinor;
int spinor_parity = arg.nParity == 2 ? parity : 0;
Spinor in = arg.in(x_cb, spinor_parity);
Spinor out;
in.toRel(); // change to chiral basis here
#pragma unroll
for (int chirality=0; chirality<2; chirality++) {
HMatrix<RegType,nColor*nSpin/2> A = arg.clover(x_cb, parity, chirality);
HalfSpinor chi = in.chiral_project(chirality);
if (arg.dynamic_clover) {
Cholesky<HMatrix, RegType, nColor * nSpin / 2> cholesky(A);
chi = static_cast<RegType>(0.25) * cholesky.backward(cholesky.forward(chi));
} else {
chi = A * chi;
}
out += chi.chiral_reconstruct(chirality);
}
out.toNonRel(); // change basis back
arg.out(x_cb, spinor_parity) = out;
}
template <typename Float, int nSpin, int nColor, typename Arg>
void cloverCPU(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.volumeCB; x_cb++) cloverApply<Float,nSpin,nColor>(arg, x_cb, parity);
}
}
template <typename Float, int nSpin, int nColor, typename Arg>
__global__ void cloverGPU(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
cloverApply<Float,nSpin,nColor>(arg, x_cb, parity);
}
template <typename Float, int nSpin, int nColor, typename Arg>
class Clover : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
protected:
long long flops() const { return arg.nParity*arg.volumeCB*504ll; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes() + arg.nParity*arg.volumeCB*arg.clover.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Clover(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~Clover() { }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
cloverCPU<Float,nSpin,nColor>(arg);
} else {
hipLaunchKernelGGL(( cloverGPU<Float,nSpin,nColor>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); } // Need to save the out field if it aliases the in field
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); } // Restore if the in and out fields alias
};
template <typename Float, int nColor>
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
if (in.Nspin() != 4) errorQuda("Unsupported nSpin=%d", in.Nspin());
constexpr int nSpin = 4;
if (inverse) {
CloverArg<Float, nSpin, nColor> arg(out, in, clover, inverse, parity);
Clover<Float, nSpin, nColor, decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream - 1]);
} else {
CloverArg<Float, nSpin, nColor> arg(out, in, clover, inverse, parity);
Clover<Float, nSpin, nColor, decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream - 1]);
}
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
if (in.Ncolor() == 3) {
ApplyClover<Float,3>(out, in, clover, inverse, parity);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the clvoer matrix field to a colorspinor field
//out(x) = clover*in
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
checkPrecision(out, clover, in); // check all precisions match
checkLocation(out, clover, in); // check all locations match
#ifdef GPU_CLOVER_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyClover<double>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyClover<float>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyClover<short>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyClover<char>(out, in, clover, inverse, parity);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
// if (!inverse) apply (Clover + i*a*gamma_5) to the input spinor
// else apply (Clover + i*a*gamma_5)/(Clover^2 + a^2) to the input spinor
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
__device__ __host__ inline void twistCloverApply(Arg &arg, int x_cb, int parity) {
using namespace linalg; // for Cholesky
constexpr int N = nColor*nSpin/2;
typedef typename mapper<Float>::type RegType;
typedef ColorSpinor<RegType,nColor,nSpin> Spinor;
typedef ColorSpinor<RegType,nColor,nSpin/2> HalfSpinor;
typedef HMatrix<RegType,N> Mat;
int spinor_parity = arg.nParity == 2 ? parity : 0;
Spinor in = arg.in(x_cb, spinor_parity);
Spinor out;
in.toRel(); // change to chiral basis here
#pragma unroll
for (int chirality=0; chirality<2; chirality++) {
// factor of 2 comes from clover normalization we need to correct for
const complex<RegType> j(0.0, chirality == 0 ? static_cast<RegType>(0.5) : -static_cast<RegType>(0.5));
Mat A = arg.clover(x_cb, parity, chirality);
HalfSpinor in_chi = in.chiral_project(chirality);
HalfSpinor out_chi = A*in_chi + j*arg.a*in_chi;
if (inverse) {
if (arg.dynamic_clover) {
Mat A2 = A.square();
A2 += arg.a*arg.a*static_cast<RegType>(0.25);
Cholesky<HMatrix,RegType,N> cholesky(A2);
out_chi = static_cast<RegType>(0.25)*cholesky.backward(cholesky.forward(out_chi));
} else {
Mat Ainv = arg.cloverInv(x_cb, parity, chirality);
out_chi = static_cast<RegType>(2.0)*(Ainv*out_chi);
}
}
out += (out_chi).chiral_reconstruct(chirality);
}
out.toNonRel(); // change basis back
arg.out(x_cb, spinor_parity) = out;
}
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
void twistCloverCPU(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.volumeCB; x_cb++) twistCloverApply<inverse,Float,nSpin,nColor>(arg, x_cb, parity);
}
}
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
__global__ void twistCloverGPU(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
twistCloverApply<inverse,Float,nSpin,nColor>(arg, x_cb, parity);
}
template <typename Float, int nSpin, int nColor, typename Arg>
class TwistClover : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
protected:
long long flops() const { return (arg.inverse ? 1056ll : 552ll) * arg.nParity*arg.volumeCB; }
long long bytes() const {
long long rtn = arg.out.Bytes() + arg.in.Bytes() + arg.nParity*arg.volumeCB*arg.clover.Bytes();
if (arg.twist == QUDA_TWIST_GAMMA5_INVERSE && !arg.dynamic_clover)
rtn += arg.nParity*arg.volumeCB*arg.cloverInv.Bytes();
return rtn;
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
TwistClover(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
strcat(aux, arg.inverse ? ",inverse" : ",direct");
}
virtual ~TwistClover() { }
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
if (arg.inverse) twistCloverCPU<true,Float,nSpin,nColor>(arg);
else twistCloverCPU<false,Float,nSpin,nColor>(arg);
} else {
if (arg.inverse)hipLaunchKernelGGL(( twistCloverGPU<true,Float,nSpin,nColor>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
elsehipLaunchKernelGGL(( twistCloverGPU<false,Float,nSpin,nColor>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); } // Need to save the out field if it aliases the in field
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); } // Restore if the in and out fields alias
};
template <typename Float, int nColor>
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
if (in.Nspin() != 4) errorQuda("Unsupported nSpin=%d", in.Nspin());
constexpr int nSpin = 4;
bool inverse = twist == QUDA_TWIST_GAMMA5_DIRECT ? false : true;
CloverArg<Float,nSpin,nColor> arg(out, in, clover, inverse, parity, kappa, mu, epsilon, dagger, twist);
TwistClover<Float,nSpin,nColor,decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream-1]);
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
if (in.Ncolor() == 3) {
ApplyTwistClover<Float,3>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the twisted-clover matrix field to a colorspinor field
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
checkPrecision(out, clover, in); // check all precisions match
checkLocation(out, clover, in); // check all locations match
#ifdef GPU_CLOVER_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyTwistClover<double>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyTwistClover<float>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyTwistClover<short>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyTwistClover<char>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
| 3e7f282922c46db5f92f8275fd1f0ab809abb9fe.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <stack>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash_quda.h>
#include <color_spinor_field_order.h>
#include <clover_field_order.h>
#include <index_helper.cuh>
#include <color_spinor.h>
#include <linalg.cuh>
#include <dslash_policy.cuh>
namespace quda {
// these should not be namespaced!!
// determines whether the temporal ghost zones are packed with a gather kernel,
// as opposed to multiple calls to cudaMemcpy()
static bool kernelPackT = false;
void setKernelPackT(bool packT) { kernelPackT = packT; }
bool getKernelPackT() { return kernelPackT; }
static std::stack<bool> kptstack;
void pushKernelPackT(bool packT)
{
kptstack.push(getKernelPackT());
setKernelPackT(packT);
if (kptstack.size() > 10)
{
warningQuda("KernelPackT stack contains %u elements. Is there a missing popKernelPackT() somewhere?",
static_cast<unsigned int>(kptstack.size()));
}
}
void popKernelPackT()
{
if (kptstack.empty())
{
errorQuda("popKernelPackT() called with empty stack");
}
setKernelPackT(kptstack.top());
kptstack.pop();
}
namespace dslash {
int it = 0;
cudaEvent_t packEnd[2];
cudaEvent_t gatherStart[Nstream];
cudaEvent_t gatherEnd[Nstream];
cudaEvent_t scatterStart[Nstream];
cudaEvent_t scatterEnd[Nstream];
cudaEvent_t dslashStart[2];
// these variables are used for benchmarking the dslash components in isolation
bool dslash_pack_compute;
bool dslash_interior_compute;
bool dslash_exterior_compute;
bool dslash_comms;
bool dslash_copy;
// whether the dslash policy tuner has been enabled
bool dslash_policy_init;
// used to keep track of which policy to start the autotuning
int first_active_policy;
int first_active_p2p_policy;
// list of dslash policies that are enabled
std::vector<QudaDslashPolicy> policies;
// list of p2p policies that are enabled
std::vector<QudaP2PPolicy> p2p_policies;
// string used as a tunekey to ensure we retune if the dslash policy env changes
char policy_string[TuneKey::aux_n];
// FIX this is a hack from hell
// Auxiliary work that can be done while waiting on comms to finis
Worker *aux_worker;
#if CUDA_VERSION >= 8000
cuuint32_t *commsEnd_h;
CUdeviceptr commsEnd_d[Nstream];
#endif
}
void createDslashEvents()
{
using namespace dslash;
// add cudaEventDisableTiming for lower sync overhead
for (int i=0; i<Nstream; i++) {
cudaEventCreateWithFlags(&gatherStart[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&gatherEnd[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterStart[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&scatterEnd[i], cudaEventDisableTiming);
}
for (int i=0; i<2; i++) {
cudaEventCreateWithFlags(&packEnd[i], cudaEventDisableTiming);
cudaEventCreateWithFlags(&dslashStart[i], cudaEventDisableTiming);
}
aux_worker = NULL;
#if CUDA_VERSION >= 8000
commsEnd_h = static_cast<cuuint32_t*>(mapped_malloc(Nstream*sizeof(int)));
for (int i=0; i<Nstream; i++) {
cudaHostGetDevicePointer((void**)&commsEnd_d[i], commsEnd_h+i, 0);
commsEnd_h[i] = 0;
}
#endif
checkCudaError();
dslash_pack_compute = true;
dslash_interior_compute = true;
dslash_exterior_compute = true;
dslash_comms = true;
dslash_copy = true;
dslash_policy_init = false;
first_active_policy = 0;
first_active_p2p_policy = 0;
// list of dslash policies that are enabled
policies = std::vector<QudaDslashPolicy>(
static_cast<int>(QudaDslashPolicy::QUDA_DSLASH_POLICY_DISABLED), QudaDslashPolicy::QUDA_DSLASH_POLICY_DISABLED);
// list of p2p policies that are enabled
p2p_policies = std::vector<QudaP2PPolicy>(
static_cast<int>(QudaP2PPolicy::QUDA_P2P_POLICY_DISABLED), QudaP2PPolicy::QUDA_P2P_POLICY_DISABLED);
strcat(policy_string, ",pol=");
}
void destroyDslashEvents()
{
using namespace dslash;
#if CUDA_VERSION >= 8000
host_free(commsEnd_h);
commsEnd_h = 0;
#endif
for (int i=0; i<Nstream; i++) {
cudaEventDestroy(gatherStart[i]);
cudaEventDestroy(gatherEnd[i]);
cudaEventDestroy(scatterStart[i]);
cudaEventDestroy(scatterEnd[i]);
}
for (int i=0; i<2; i++) {
cudaEventDestroy(packEnd[i]);
cudaEventDestroy(dslashStart[i]);
}
checkCudaError();
}
/**
@brief Parameter structure for driving the Gamma operator
*/
template <typename Float, int nColor>
struct GammaArg {
typedef typename colorspinor_mapper<Float,4,nColor>::type F;
typedef typename mapper<Float>::type RegType;
F out; // output vector field
const F in; // input vector field
const int d; // which gamma matrix are we applying
const int nParity; // number of parities we're working on
bool doublet; // whether we applying the operator to a doublet
const int volumeCB; // checkerboarded volume
RegType a; // scale factor
RegType b; // chiral twist
RegType c; // flavor twist
GammaArg(ColorSpinorField &out, const ColorSpinorField &in, int d,
RegType kappa=0.0, RegType mu=0.0, RegType epsilon=0.0,
bool dagger=false, QudaTwistGamma5Type twist=QUDA_TWIST_GAMMA5_INVALID)
: out(out), in(in), d(d), nParity(in.SiteSubset()),
doublet(in.TwistFlavor() == QUDA_TWIST_DEG_DOUBLET || in.TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET),
volumeCB(doublet ? in.VolumeCB()/2 : in.VolumeCB()), a(0.0), b(0.0), c(0.0)
{
if (d < 0 || d > 4) errorQuda("Undefined gamma matrix %d", d);
if (in.Nspin() != 4) errorQuda("Cannot apply gamma5 to nSpin=%d field", in.Nspin());
if (!in.isNative() || !out.isNative()) errorQuda("Unsupported field order out=%d in=%d\n", out.FieldOrder(), in.FieldOrder());
if (in.TwistFlavor() == QUDA_TWIST_SINGLET) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
b = 2.0 * kappa * mu;
a = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
b = -2.0 * kappa * mu;
a = 1.0 / (1.0 + b * b);
}
c = 0.0;
if (dagger) b *= -1.0;
} else if (doublet) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
b = 2.0 * kappa * mu;
c = -2.0 * kappa * epsilon;
a = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
b = -2.0 * kappa * mu;
c = 2.0 * kappa * epsilon;
a = 1.0 / (1.0 + b * b - c * c);
if (a <= 0) errorQuda("Invalid twisted mass parameters (kappa=%e, mu=%e, epsilon=%e)\n", kappa, mu, epsilon);
}
if (dagger) b *= -1.0;
}
}
};
// CPU kernel for applying the gamma matrix to a colorspinor
template <typename Float, int nColor, typename Arg>
void gammaCPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
for (int parity= 0; parity < arg.nParity; parity++) {
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = in.gamma(arg.d);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the gamma matrix to a colorspinor
template <typename Float, int nColor, int d, typename Arg>
__global__ void gammaGPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = in.gamma(d);
}
template <typename Float, int nColor, typename Arg>
class Gamma : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const { return 0; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Gamma(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~Gamma() { }
void apply(const cudaStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
gammaCPU<Float,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch (arg.d) {
case 4: gammaGPU<Float,nColor,4> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); }
void postTune() { arg.out.load(); }
};
template <typename Float, int nColor>
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
GammaArg<Float,nColor> arg(out, in, d);
Gamma<Float,nColor,GammaArg<Float,nColor> > gamma(arg, in);
gamma.apply(streams[Nstream-1]);
}
// template on the number of colors
template <typename Float>
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
if (in.Ncolor() == 3) {
ApplyGamma<Float,3>(out, in, d);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the Gamma matrix to a colorspinor field
//out(x) = gamma_d*in
void ApplyGamma(ColorSpinorField &out, const ColorSpinorField &in, int d)
{
checkPrecision(out, in); // check all precisions match
checkLocation(out, in); // check all locations match
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyGamma<double>(out, in, d);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyGamma<float>(out, in, d);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyGamma<short>(out, in, d);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyGamma<char>(out, in, d);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
}
// CPU kernel for applying the gamma matrix to a colorspinor
template <bool doublet, typename Float, int nColor, typename Arg>
void twistGammaCPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
for (int parity= 0; parity < arg.nParity; parity++) {
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
if (!doublet) {
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = arg.a * (in + arg.b * in.igamma(arg.d));
} else {
ColorSpinor<RegType,nColor,4> in_1 = arg.in(x_cb+0*arg.volumeCB, parity);
ColorSpinor<RegType,nColor,4> in_2 = arg.in(x_cb+1*arg.volumeCB, parity);
arg.out(x_cb + 0 * arg.volumeCB, parity) = arg.a * (in_1 + arg.b * in_1.igamma(arg.d) + arg.c * in_2);
arg.out(x_cb + 1 * arg.volumeCB, parity) = arg.a * (in_2 - arg.b * in_2.igamma(arg.d) + arg.c * in_1);
}
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the gamma matrix to a colorspinor
template <bool doublet, typename Float, int nColor, int d, typename Arg>
__global__ void twistGammaGPU(Arg arg)
{
typedef typename mapper<Float>::type RegType;
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (!doublet) {
ColorSpinor<RegType,nColor,4> in = arg.in(x_cb, parity);
arg.out(x_cb, parity) = arg.a * (in + arg.b * in.igamma(d));
} else {
ColorSpinor<RegType,nColor,4> in_1 = arg.in(x_cb+0*arg.volumeCB, parity);
ColorSpinor<RegType,nColor,4> in_2 = arg.in(x_cb+1*arg.volumeCB, parity);
arg.out(x_cb + 0 * arg.volumeCB, parity) = arg.a * (in_1 + arg.b * in_1.igamma(d) + arg.c * in_2);
arg.out(x_cb + 1 * arg.volumeCB, parity) = arg.a * (in_2 - arg.b * in_2.igamma(d) + arg.c * in_1);
}
}
template <typename Float, int nColor, typename Arg>
class TwistGamma : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const { return 0; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
TwistGamma(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~TwistGamma() { }
void apply(const cudaStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
if (arg.doublet) twistGammaCPU<true,Float,nColor>(arg);
twistGammaCPU<false,Float,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (arg.doublet)
switch (arg.d) {
case 4: twistGammaGPU<true,Float,nColor,4> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
else
switch (arg.d) {
case 4: twistGammaGPU<false,Float,nColor,4> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
default: errorQuda("%d not instantiated", arg.d);
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); }
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); }
};
template <typename Float, int nColor>
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
GammaArg<Float,nColor> arg(out, in, d, kappa, mu, epsilon, dagger, type);
TwistGamma<Float,nColor,GammaArg<Float,nColor> > gamma(arg, in);
gamma.apply(streams[Nstream-1]);
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
if (in.Ncolor() == 3) {
ApplyTwistGamma<Float,3>(out, in, d, kappa, mu, epsilon, dagger, type);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the Gamma matrix to a colorspinor field
//out(x) = gamma_d*in
void ApplyTwistGamma(ColorSpinorField &out, const ColorSpinorField &in, int d, double kappa, double mu, double epsilon, int dagger, QudaTwistGamma5Type type)
{
checkPrecision(out, in); // check all precisions match
checkLocation(out, in); // check all locations match
#ifdef GPU_TWISTED_MASS_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyTwistGamma<double>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyTwistGamma<float>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyTwistGamma<short>(out, in, d, kappa, mu, epsilon, dagger, type);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyTwistGamma<char>(out, in, d, kappa, mu, epsilon, dagger, type);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Twisted mass dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
// Applies a gamma5 matrix to a spinor (wrapper to ApplyGamma)
void gamma5(ColorSpinorField &out, const ColorSpinorField &in) { ApplyGamma(out,in,4); }
/**
@brief Parameteter structure for driving the clover and twist-clover application kernels
@tparam Float Underlying storage precision
@tparam nSpin Number of spin components
@tparam nColor Number of colors
@tparam dynamic_clover Whether we are inverting the clover field on the fly
*/
template <typename Float, int nSpin, int nColor>
struct CloverArg {
static constexpr int length = (nSpin / (nSpin/2)) * 2 * nColor * nColor * (nSpin/2) * (nSpin/2) / 2;
static constexpr bool dynamic_clover = dynamic_clover_inverse();
typedef typename colorspinor_mapper<Float,nSpin,nColor>::type F;
typedef typename clover_mapper<Float,length>::type C;
typedef typename mapper<Float>::type RegType;
F out; // output vector field
const F in; // input vector field
const C clover; // clover field
const C cloverInv; // inverse clover field (only set if not dynamic clover and doing twisted clover)
const int nParity; // number of parities we're working on
const int parity; // which parity we're acting on (if nParity=1)
bool inverse; // whether we are applying the inverse
bool doublet; // whether we applying the operator to a doublet
const int volumeCB; // checkerboarded volume
RegType a;
RegType b;
RegType c;
QudaTwistGamma5Type twist;
CloverArg(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
bool inverse, int parity, RegType kappa=0.0, RegType mu=0.0, RegType epsilon=0.0,
bool dagger = false, QudaTwistGamma5Type twist=QUDA_TWIST_GAMMA5_INVALID)
: out(out), clover(clover, twist == QUDA_TWIST_GAMMA5_INVALID ? inverse : false),
cloverInv(clover, (twist != QUDA_TWIST_GAMMA5_INVALID && !dynamic_clover) ? true : false),
in(in), nParity(in.SiteSubset()), parity(parity), inverse(inverse),
doublet(in.TwistFlavor() == QUDA_TWIST_DEG_DOUBLET || in.TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET),
volumeCB(doublet ? in.VolumeCB()/2 : in.VolumeCB()), a(0.0), b(0.0), c(0.0), twist(twist)
{
if (in.TwistFlavor() == QUDA_TWIST_SINGLET) {
if (twist == QUDA_TWIST_GAMMA5_DIRECT) {
a = 2.0 * kappa * mu;
b = 1.0;
} else if (twist == QUDA_TWIST_GAMMA5_INVERSE) {
a = -2.0 * kappa * mu;
b = 1.0 / (1.0 + a*a);
}
c = 0.0;
if (dagger) a *= -1.0;
} else if (doublet) {
errorQuda("ERROR: Non-degenerated twisted-mass not supported in this regularization\n");
}
}
};
template <typename Float, int nSpin, int nColor, typename Arg>
__device__ __host__ inline void cloverApply(Arg &arg, int x_cb, int parity) {
using namespace linalg; // for Cholesky
typedef typename mapper<Float>::type RegType;
typedef ColorSpinor<RegType, nColor, nSpin> Spinor;
typedef ColorSpinor<RegType, nColor, nSpin / 2> HalfSpinor;
int spinor_parity = arg.nParity == 2 ? parity : 0;
Spinor in = arg.in(x_cb, spinor_parity);
Spinor out;
in.toRel(); // change to chiral basis here
#pragma unroll
for (int chirality=0; chirality<2; chirality++) {
HMatrix<RegType,nColor*nSpin/2> A = arg.clover(x_cb, parity, chirality);
HalfSpinor chi = in.chiral_project(chirality);
if (arg.dynamic_clover) {
Cholesky<HMatrix, RegType, nColor * nSpin / 2> cholesky(A);
chi = static_cast<RegType>(0.25) * cholesky.backward(cholesky.forward(chi));
} else {
chi = A * chi;
}
out += chi.chiral_reconstruct(chirality);
}
out.toNonRel(); // change basis back
arg.out(x_cb, spinor_parity) = out;
}
template <typename Float, int nSpin, int nColor, typename Arg>
void cloverCPU(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.volumeCB; x_cb++) cloverApply<Float,nSpin,nColor>(arg, x_cb, parity);
}
}
template <typename Float, int nSpin, int nColor, typename Arg>
__global__ void cloverGPU(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
cloverApply<Float,nSpin,nColor>(arg, x_cb, parity);
}
template <typename Float, int nSpin, int nColor, typename Arg>
class Clover : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
protected:
long long flops() const { return arg.nParity*arg.volumeCB*504ll; }
long long bytes() const { return arg.out.Bytes() + arg.in.Bytes() + arg.nParity*arg.volumeCB*arg.clover.Bytes(); }
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Clover(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
}
virtual ~Clover() { }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
cloverCPU<Float,nSpin,nColor>(arg);
} else {
cloverGPU<Float,nSpin,nColor> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); } // Need to save the out field if it aliases the in field
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); } // Restore if the in and out fields alias
};
template <typename Float, int nColor>
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
if (in.Nspin() != 4) errorQuda("Unsupported nSpin=%d", in.Nspin());
constexpr int nSpin = 4;
if (inverse) {
CloverArg<Float, nSpin, nColor> arg(out, in, clover, inverse, parity);
Clover<Float, nSpin, nColor, decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream - 1]);
} else {
CloverArg<Float, nSpin, nColor> arg(out, in, clover, inverse, parity);
Clover<Float, nSpin, nColor, decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream - 1]);
}
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
if (in.Ncolor() == 3) {
ApplyClover<Float,3>(out, in, clover, inverse, parity);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the clvoer matrix field to a colorspinor field
//out(x) = clover*in
void ApplyClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover, bool inverse, int parity)
{
checkPrecision(out, clover, in); // check all precisions match
checkLocation(out, clover, in); // check all locations match
#ifdef GPU_CLOVER_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyClover<double>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyClover<float>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyClover<short>(out, in, clover, inverse, parity);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyClover<char>(out, in, clover, inverse, parity);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
// if (!inverse) apply (Clover + i*a*gamma_5) to the input spinor
// else apply (Clover + i*a*gamma_5)/(Clover^2 + a^2) to the input spinor
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
__device__ __host__ inline void twistCloverApply(Arg &arg, int x_cb, int parity) {
using namespace linalg; // for Cholesky
constexpr int N = nColor*nSpin/2;
typedef typename mapper<Float>::type RegType;
typedef ColorSpinor<RegType,nColor,nSpin> Spinor;
typedef ColorSpinor<RegType,nColor,nSpin/2> HalfSpinor;
typedef HMatrix<RegType,N> Mat;
int spinor_parity = arg.nParity == 2 ? parity : 0;
Spinor in = arg.in(x_cb, spinor_parity);
Spinor out;
in.toRel(); // change to chiral basis here
#pragma unroll
for (int chirality=0; chirality<2; chirality++) {
// factor of 2 comes from clover normalization we need to correct for
const complex<RegType> j(0.0, chirality == 0 ? static_cast<RegType>(0.5) : -static_cast<RegType>(0.5));
Mat A = arg.clover(x_cb, parity, chirality);
HalfSpinor in_chi = in.chiral_project(chirality);
HalfSpinor out_chi = A*in_chi + j*arg.a*in_chi;
if (inverse) {
if (arg.dynamic_clover) {
Mat A2 = A.square();
A2 += arg.a*arg.a*static_cast<RegType>(0.25);
Cholesky<HMatrix,RegType,N> cholesky(A2);
out_chi = static_cast<RegType>(0.25)*cholesky.backward(cholesky.forward(out_chi));
} else {
Mat Ainv = arg.cloverInv(x_cb, parity, chirality);
out_chi = static_cast<RegType>(2.0)*(Ainv*out_chi);
}
}
out += (out_chi).chiral_reconstruct(chirality);
}
out.toNonRel(); // change basis back
arg.out(x_cb, spinor_parity) = out;
}
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
void twistCloverCPU(Arg &arg) {
for (int parity=0; parity<arg.nParity; parity++) {
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb=0; x_cb<arg.volumeCB; x_cb++) twistCloverApply<inverse,Float,nSpin,nColor>(arg, x_cb, parity);
}
}
template <bool inverse, typename Float, int nSpin, int nColor, typename Arg>
__global__ void twistCloverGPU(Arg arg) {
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
twistCloverApply<inverse,Float,nSpin,nColor>(arg, x_cb, parity);
}
template <typename Float, int nSpin, int nColor, typename Arg>
class TwistClover : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
protected:
long long flops() const { return (arg.inverse ? 1056ll : 552ll) * arg.nParity*arg.volumeCB; }
long long bytes() const {
long long rtn = arg.out.Bytes() + arg.in.Bytes() + arg.nParity*arg.volumeCB*arg.clover.Bytes();
if (arg.twist == QUDA_TWIST_GAMMA5_INVERSE && !arg.dynamic_clover)
rtn += arg.nParity*arg.volumeCB*arg.cloverInv.Bytes();
return rtn;
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
TwistClover(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
strcat(aux, arg.inverse ? ",inverse" : ",direct");
}
virtual ~TwistClover() { }
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
if (arg.inverse) twistCloverCPU<true,Float,nSpin,nColor>(arg);
else twistCloverCPU<false,Float,nSpin,nColor>(arg);
} else {
if (arg.inverse) twistCloverGPU<true,Float,nSpin,nColor> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
else twistCloverGPU<false,Float,nSpin,nColor> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { if (arg.out.field == arg.in.field) arg.out.save(); } // Need to save the out field if it aliases the in field
void postTune() { if (arg.out.field == arg.in.field) arg.out.load(); } // Restore if the in and out fields alias
};
template <typename Float, int nColor>
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
if (in.Nspin() != 4) errorQuda("Unsupported nSpin=%d", in.Nspin());
constexpr int nSpin = 4;
bool inverse = twist == QUDA_TWIST_GAMMA5_DIRECT ? false : true;
CloverArg<Float,nSpin,nColor> arg(out, in, clover, inverse, parity, kappa, mu, epsilon, dagger, twist);
TwistClover<Float,nSpin,nColor,decltype(arg)> worker(arg, in);
worker.apply(streams[Nstream-1]);
checkCudaError();
}
// template on the number of colors
template <typename Float>
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
if (in.Ncolor() == 3) {
ApplyTwistClover<Float,3>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else {
errorQuda("Unsupported number of colors %d\n", in.Ncolor());
}
}
//Apply the twisted-clover matrix field to a colorspinor field
void ApplyTwistClover(ColorSpinorField &out, const ColorSpinorField &in, const CloverField &clover,
double kappa, double mu, double epsilon, int parity, int dagger, QudaTwistGamma5Type twist)
{
checkPrecision(out, clover, in); // check all precisions match
checkLocation(out, clover, in); // check all locations match
#ifdef GPU_CLOVER_DIRAC
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyTwistClover<double>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
ApplyTwistClover<float>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
ApplyTwistClover<short>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else if (in.Precision() == QUDA_QUARTER_PRECISION) {
ApplyTwistClover<char>(out, in, clover, kappa, mu, epsilon, parity, dagger, twist);
} else {
errorQuda("Unsupported precision %d\n", in.Precision());
}
#else
errorQuda("Clover dslash has not been built");
#endif // GPU_TWISTED_MASS_DIRAC
}
} // namespace quda
|
467f820abee797bcc429bc5feaca8895a25f758e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* tetrahedra-based raytracer
* Copyright (C) 2015-2016 Christian Lehmann
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define GLEW_STATIC
#include "Util.h"
#include "mesh_io.h"
#include "tetgenio.h"
#include "Camera.h"
#include "device_launch_parameters.h"
#include "GLFW/glfw3.h"
#include <cuda_gl_interop.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "Sphere.h"
#define spp 1
#define gamma 2.2f
#define MAX_DEPTH 2
#define width 800
#define height 600
float3* finalimage;
float3* accumulatebuffer;
uint32_t frameNumber = 0;
bool bufferReset = false;
float deltaTime, lastFrame;
BBox box;
GLuint vbo;
mesh2 *mesh;
__managed__ bool edgeVisualization = false;
// Camera
InteractiveCamera* interactiveCamera = NULL;
Camera* hostRendercam = NULL;
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
bool buttonActive = false, enableMouseMovement = true, cursorFree = false;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = 0.0;
int lastX = width / 2, lastY = height / 2;
int theButtonState = 0;
int theModifierState = 0;
float scalefactor = 1.2f;
union Color // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
// CUDA error checking
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
system("PAUSE");
if (abort) exit(code);
}
}
unsigned int WangHash(unsigned int a) {
// richiesams.blogspot.co.nz/2015/03/creating-randomness-and-acummulating.html
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
static void error_callback(int error, const char* description)
{
// GLFW error callback
fputs(description, stderr);
}
void updateCamPos()
{
float4 pos = hostRendercam->position;
// check if current pos is still inside tetrahedralization
ClampToBBox(&box, hostRendercam->position);
// look for new tetrahedra...
/*uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, pos);
gpuErrchk(hipDeviceSynchronize());*/
// ndern: von _start_tet die vier adjtets laden, mit IsPointInTetrahedron checken
int32_t adjtets[4] = { mesh->t_adjtet1[_start_tet], mesh->t_adjtet2[_start_tet], mesh->t_adjtet3[_start_tet], mesh->t_adjtet4[_start_tet] };
if (!IsPointInThisTetCPU(mesh, pos, _start_tet))
{
//fprintf(stderr, "Alert - Outside \n");
//fprintf(stderr, "Adjacent tets: %ld %ld %ld %ld \n", adjtets[0], adjtets[1], adjtets[2], adjtets[3]);
if (IsPointInThisTetCPU(mesh, pos, adjtets[0])) _start_tet = adjtets[0];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[1])) _start_tet = adjtets[1];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[2])) _start_tet = adjtets[2];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[3])) _start_tet = adjtets[3];
else
{
fprintf(stderr, "Fallback to CUDA search for starting tet\n");
uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, pos);
gpuErrchk(hipDeviceSynchronize());
}
//fprintf(stderr, "New starting tet: %ld \n", _start_tet);
}
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
float dist = 0.3; // skipping tetras if set too high...
if (action == GLFW_PRESS) buttonActive = true;
if (action == GLFW_RELEASE) buttonActive = false;
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (key == GLFW_KEY_A && buttonActive)
{
interactiveCamera->strafe(-dist); updateCamPos();
}
if (key == GLFW_KEY_D && buttonActive)
{
interactiveCamera->strafe(dist); updateCamPos();
}
if (key == GLFW_KEY_W && buttonActive)
{
interactiveCamera->goForward(dist); updateCamPos();
}
if (key == GLFW_KEY_S && buttonActive)
{
interactiveCamera->goForward(-dist); updateCamPos();
}
if (key == GLFW_KEY_R && buttonActive)
{
interactiveCamera->changeAltitude(dist); updateCamPos();
}
if (key == GLFW_KEY_F && buttonActive)
{
interactiveCamera->changeAltitude(-dist); updateCamPos();
}
if (key == GLFW_KEY_G && buttonActive)
{
interactiveCamera->changeApertureDiameter(0.1);
}
if (key == GLFW_KEY_H && buttonActive)
{
interactiveCamera->changeApertureDiameter(-0.1);
}
if (key == GLFW_KEY_T && buttonActive)
{
interactiveCamera->changeFocalDistance(0.1);
}
if (key == GLFW_KEY_Z && buttonActive)
{
interactiveCamera->changeFocalDistance(-0.1);
}
if (key == GLFW_KEY_UP && buttonActive)
{
interactiveCamera->changePitch(0.02f);
}
if (key == GLFW_KEY_DOWN && buttonActive)
{
interactiveCamera->changePitch(-0.02f);
}
if (key == GLFW_KEY_LEFT && buttonActive)
{
interactiveCamera->changeYaw(0.02f);
}
if (key == GLFW_KEY_RIGHT && buttonActive)
{
interactiveCamera->changeYaw(-0.02f);
}
if (key == GLFW_KEY_B && buttonActive)
{
// debug stuff
updateCamPos();
}
if (key == GLFW_KEY_M && action == GLFW_PRESS)
{
// debug stuff
if (!edgeVisualization) edgeVisualization = true; else edgeVisualization = false;
}
if (key == GLFW_KEY_C && action == GLFW_PRESS)
{
if (cursorFree == false) { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL); cursorFree = true; enableMouseMovement = false; }
else { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); cursorFree = false; enableMouseMovement = true; }
}
bufferReset = true;
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS) theButtonState = 0;
if (button == GLFW_MOUSE_BUTTON_MIDDLE && action == GLFW_PRESS) theButtonState = 1;
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS) theButtonState = 2;
}
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
int deltaX = lastX - xpos;
int deltaY = lastY - ypos;
if (enableMouseMovement) if (deltaX != 0 || deltaY != 0) {
if (theButtonState == 0) // Rotate
{
interactiveCamera->changeYaw(deltaX * 0.01);
interactiveCamera->changePitch(-deltaY * 0.01);
}
else if (theButtonState == 1) // Zoom
{
interactiveCamera->changeAltitude(-deltaY * 0.01);
updateCamPos();
}
if (theButtonState == 2) // camera move
{
interactiveCamera->changeRadius(-deltaY * 0.01);
updateCamPos();
}
lastX = xpos;
lastY = ypos;
bufferReset = true;
}
}
__device__ RGB radiance(mesh2 *mesh, int32_t start, Ray &ray, float4 oldpos, hiprandState_t* randState)
{
float4 mask = make_float4(1.0f, 1.0f, 1.0f, 1.0f); // colour mask (accumulated reflectance)
float4 accucolor = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // accumulated colour
float4 originInWorldSpace = ray.o;
float4 rayInWorldSpace = ray.d;
int32_t newstart = start;
for (int bounces = 0; bounces < MAX_DEPTH; bounces++)
{
float4 f = make_float4(0, 0, 0, 0); // primitive colour
float4 emit = make_float4(0, 0, 0, 0); // primitive emission colour
float4 x; // intersection point
float4 n; // normal
float4 nl; // oriented normal
float4 dw; // ray direction of next path segment
float4 pointHitInWorldSpace;
float3 rayorig = make_float3(originInWorldSpace.x, originInWorldSpace.y, originInWorldSpace.z);
float3 raydir = make_float3(rayInWorldSpace.x, rayInWorldSpace.y, rayInWorldSpace.z);
bool isEdge = false;
double dist;
rayhit firsthit;
Geometry geom;
// ------------------------------ TRIANGLE intersection --------------------------------------------
traverse_ray(mesh, originInWorldSpace, rayInWorldSpace, newstart, firsthit, dist, edgeVisualization, isEdge, n);
pointHitInWorldSpace = originInWorldSpace + rayInWorldSpace * dist;
// ------------------------------ SPHERE intersection --------------------------------------------
float4 spherePos = make_float4(10,10,10,0);
float sphereRad = 10, sphereDist = 0;
bool spheresEnabled = false;
if (spheresEnabled) { sphereDist = sphIntersect(originInWorldSpace, rayInWorldSpace, spherePos, sphereRad); }
if (sphereDist > 0.0) { geom = SPHERE; traverse_until_point(mesh, originInWorldSpace, rayInWorldSpace, newstart, originInWorldSpace + rayInWorldSpace * sphereDist, firsthit); }
else { geom = TRIANGLE; }
if (geom == SPHERE)
{
emit = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
f = make_float4(1.0f, 1.0f, 1.0f, 0.0f);
firsthit.refl_t = REFR;
x = originInWorldSpace + rayInWorldSpace * sphereDist;
n= normalize((x - spherePos));
nl = Dot(n, rayInWorldSpace) < 0 ? n : n * -1;
}
if (geom == TRIANGLE)
{
x = pointHitInWorldSpace;
n = normalize(n);
nl = Dot(n, rayInWorldSpace) < 0 ? n : n * -1;
if (firsthit.constrained == true) { emit = make_float4(0.0f, 0.0f, 0.0f, 0.0f); f = make_float4(0.0f, 0.0f, 0.75f, 0.0f); } // blue is constrained
if (firsthit.wall == true)
{
emit = make_float4(1.0f, 0.0f, 0.0f, 0.0f); // wall wird erkannt
f = make_float4(0.3f, 0.1f, 0.4f, 0.0f);
/*float4 color1 = make_float4(0, 0, 0, 0);
float4 color2 = make_float4(0.0f, 1.0f, 1.0f, 0);
float percent = (((rayInWorldSpace.y + 1) * (1 - 0)) / (1 + 1)) + 0;
float red = color1.x + percent * (color2.x - color1.x);
float green = color1.y + percent * (color2.y - color1.y);
float blue = color1.z + percent * (color2.z - color1.z);
f = make_float4(red, green, blue, 0);*/
}
// dark ist wei
if (firsthit.dark == true) { emit = make_float4(12.0f, 12.0f, 7.0f, 0.0f); f = make_float4(0.0f, 1.0f, 0.0f, 0.0f); /*printf("ncountered dark state\n");*/ }
//if (firsthit.face == 3 || firsthit.face == 6) { emit = make_float4(12, 12, 12, 0); f = make_float4(0.0f, 0.0f, 0.0f, 0.0f); }
if (firsthit.constrained == true) { firsthit.refl_t = DIFF; }
if (firsthit.wall == true) { firsthit.refl_t = DIFF; }
if (firsthit.dark == true) { firsthit.refl_t = DIFF; }
if (edgeVisualization && isEdge) { emit = make_float4(1.0f, 1.0f, 0.0f, 0.0f); f = make_float4(1.0f, 0.0f, 0.0f, 0.0f);} // visualize wall/constrained edges
}
// basic material system, all parameters are hard-coded (such as phong exponent, index of refraction)
accucolor += (mask * emit);
// diffuse material, based on smallpt by Kevin Beason
if (firsthit.refl_t == DIFF){
// pick two random numbers
float phi = 2 * _PI_ * hiprand_uniform(randState);
float r2 = hiprand_uniform(randState);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float4 w = nl; w = normalize(w);
float4 u = Cross((fabs(w.x) > .1 ? make_float4(0, 1, 0, 0) : make_float4(1, 0, 0, 0)), w); u = normalize(u);
float4 v = Cross(w, u);
// compute cosine weighted random ray direction on hemisphere
dw = u*cosf(phi)*r2s + v*sinf(phi)*r2s + w*sqrtf(1 - r2);
dw = normalize(dw);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + w * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// Phong metal material from "Realistic Ray Tracing", P. Shirley
if (firsthit.refl_t == METAL){
// compute random perturbation of ideal reflection vector
// the higher the phong exponent, the closer the perturbed vector is to the ideal reflection direction
float phi = 2 * _PI_ * hiprand_uniform(randState);
float r2 = hiprand_uniform(randState);
float phongexponent = 20;
float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1));
float sinTheta = sqrtf(1 - cosTheta * cosTheta);
// create orthonormal basis uvw around reflection vector with hitpoint as origin
// w is ray direction for ideal reflection
float4 w = rayInWorldSpace - n * 2.0f * Dot(n, rayInWorldSpace); w = normalize(w);
float4 u = Cross((fabs(w.x) > .1 ? make_float4(0, 1, 0, 0) : make_float4(1, 0, 0, 0)), w); u = normalize(u);
float4 v = Cross(w, u); // v is normalised by default
// compute cosine weighted random ray direction on hemisphere
dw = u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta;
dw = normalize(dw);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + w * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// specular material (perfect mirror)
if (firsthit.refl_t == SPEC){
// compute reflected ray direction according to Snell's law
dw = rayInWorldSpace - n * 2.0f * Dot(n, rayInWorldSpace);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// perfectly refractive material (glass, water)
if (firsthit.refl_t == REFR){
bool into = Dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = Dot(rayInWorldSpace, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
dw = rayInWorldSpace;
dw -= n * 2.0f * Dot(n, rayInWorldSpace);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
}
else // cos2t > 0
{
// compute direction of transmission ray
float4 tdir = rayInWorldSpace * nnt;
tdir -= n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t)));
tdir = normalize(tdir);
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : Dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (hiprand_uniform(randState) < 0.25) // reflection ray
{
mask *= RP;
dw = rayInWorldSpace;
dw -= n * 2.0f * Dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
}
else // transmission ray
{
mask *= TP;
dw = tdir; //r = Ray(x, tdir);
pointHitInWorldSpace = x + nl * 0.001f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
originInWorldSpace = pointHitInWorldSpace;
rayInWorldSpace = dw;
newstart = firsthit.tet; // new tet origin
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return RGB(accucolor.x, accucolor.y, accucolor.z);
}
__global__ void renderKernel(mesh2 *tetmesh, int32_t start, float3 *accumbuffer, float3 *c, unsigned int hashedframenumber, unsigned int framenumber, float4 position, float4 view, float4 up, float fovx, float fovy, float focalDistance, float apertureRadius)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = (height - y - 1)*width + x;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprandState_t randState;
hiprand_init(hashedframenumber + threadId, 0, 0, &randState);
int pixelx = x; // pixel x-coordinate on screen
int pixely = height - y - 1; // pixel y-coordintate on screen
float4 rendercampos = make_float4(position.x, position.y, position.z, 0);
RGB finalcol(0);
for (int s = 0; s < spp; s++)
{
float4 rendercamview = make_float4(view.x, view.y, view.z, 0); rendercamview = normalize(rendercamview); // view is already supposed to be normalized, but normalize it explicitly just in case.
float4 rendercamup = make_float4(up.x, up.y, up.z, 0); rendercamup = normalize(rendercamup);
float4 horizontalAxis = Cross(rendercamview, rendercamup); horizontalAxis = normalize(horizontalAxis); // Important to normalize!
float4 verticalAxis = Cross(horizontalAxis, rendercamview); verticalAxis = normalize(verticalAxis); // verticalAxis is normalized by default, but normalize it explicitly just for good measure.
float4 middle = rendercampos + rendercamview;
float4 horizontal = horizontalAxis * tanf(fovx * 0.5 * (_PI_ / 180)); // Now treating FOV as the full FOV, not half, so I multiplied it by 0.5. I also normzlized A and B, so there's no need to divide by the length of A or B anymore. Also normalized view and removed lengthOfView. Also removed the cast to float.
float4 vertical = verticalAxis * tanf(-fovy * 0.5 * (_PI_ / 180)); // Now treating FOV as the full FOV, not half, so I multiplied it by 0.5. I also normzlized A and B, so there's no need to divide by the length of A or B anymore. Also normalized view and removed lengthOfView. Also removed the cast to float.
float jitterValueX = hiprand_uniform(&randState) - 0.5;
float jitterValueY = hiprand_uniform(&randState) - 0.5;
float sx = (jitterValueX + pixelx) / (width - 1);
float sy = (jitterValueY + pixely) / (height - 1);
float4 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float4 pointOnImagePlane = rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * focalDistance); // Important for depth of field!
float4 aperturePoint;
if (apertureRadius > 0.00001)
{
float random1 = hiprand_uniform(&randState);
float random2 = hiprand_uniform(&randState);
float angle = 2 * _PI_ * random1;
float distance = apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
}
else { aperturePoint = rendercampos; }
// calculate ray direction of next ray in path
float4 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = normalize(apertureToImagePlane); // ray direction, needs to be normalised
float4 rayInWorldSpace = apertureToImagePlane;
rayInWorldSpace = normalize(rayInWorldSpace);
float4 originInWorldSpace = aperturePoint;
finalcol += radiance(tetmesh, start, Ray(originInWorldSpace, rayInWorldSpace), rendercampos, &randState) * (1.0f / spp);
}
accumbuffer[i] += finalcol;
float3 tempcol = accumbuffer[i] / framenumber;
Color fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / gamma) * 255), (unsigned char)(powf(colour.y, 1 / gamma) * 255), (unsigned char)(powf(colour.z, 1 / gamma) * 255), 1);
c[i] = make_float3(x, y, fcolour.c);
}
void render()
{
GLFWwindow* window;
if (!glfwInit()) exit(EXIT_FAILURE);
window = glfwCreateWindow(width, height, "", NULL, NULL);
glfwMakeContextCurrent(window);
glfwSetErrorCallback(error_callback);
glfwSetKeyCallback(window, key_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
glewExperimental = GL_TRUE;
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 "))
{
fprintf(stderr, "GLEW not supported.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "GLEW successfully initialized \n");
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
glOrtho(0.0, width, 0.0, height, 0, 1);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, width * height * sizeof(float3), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaGraphicsResource_t _cgr;
size_t num_bytes;
hipGraphicsGLRegisterBuffer(&_cgr, vbo, hipGraphicsRegisterFlagsNone);
fprintf(stderr, "VBO created \n");
fprintf(stderr, "Entering glutMainLoop... \n");
my_stbtt_initfont(); // font initialization
while (!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (bufferReset)
{
frameNumber = 0;
hipMemset(accumulatebuffer, 1, width * height * sizeof(float3));
}
bufferReset = false;
frameNumber++;
interactiveCamera->buildRenderCamera(hostRendercam);
// Calculate deltatime of current frame
GLfloat currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
std::stringstream title;
title << "Tetrahedral pathtracing with node-based tetrahedral mesh (2016) by Christian Lehmann";
glfwSetWindowTitle(window, title.str().c_str());
// CUDA interop
hipGraphicsMapResources(1, &_cgr, 0);
hipGraphicsResourceGetMappedPointer((void**)&finalimage, &num_bytes, _cgr);
glClear(GL_COLOR_BUFFER_BIT);
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
renderKernel << <grid, block >> >(mesh, _start_tet, accumulatebuffer, finalimage, WangHash(frameNumber), frameNumber,
hostRendercam->position, hostRendercam->view, hostRendercam->up, hostRendercam->fov.x, hostRendercam->fov.x,
hostRendercam->focalDistance, hostRendercam->apertureRadius);
gpuErrchk(hipDeviceSynchronize());
hipGraphicsUnmapResources(1, &_cgr, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
float mrays = width*height*MAX_DEPTH*0.000001 / deltaTime;
std::string a = "Currently " + std::to_string(mrays) + " Mray/s";
my_stbtt_print(100, 200, a, make_float3(1, 1, 1));
std::string str_orig = std::to_string(hostRendercam->position.x) + " " + std::to_string(hostRendercam->position.y) + " " + std::to_string(hostRendercam->position.z);
std::string str_dir = std::to_string(hostRendercam->view.x) + " " + std::to_string(hostRendercam->view.y) + " " + std::to_string(hostRendercam->view.z);
my_stbtt_print(100, 150, "Ray origin: " + str_orig, make_float3(1, 1, 1));
my_stbtt_print(100, 100, "Ray direction: " + str_dir, make_float3(1, 1, 1));
my_stbtt_print(100, 50, "Current tet: " + std::to_string(_start_tet), make_float3(1, 1, 1));
my_stbtt_print(100, 10, "Current ms/frame: " + std::to_string(deltaTime*1000), make_float3(1, 1, 1));
glfwSwapBuffers(window);
}
}
int main(int argc, char *argv[])
{
delete interactiveCamera;
interactiveCamera = new InteractiveCamera();
interactiveCamera->setResolution(width, height);
interactiveCamera->setFOVX(45);
hostRendercam = new Camera();
interactiveCamera->buildRenderCamera(hostRendercam);
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
hipChooseDevice(&dev, &prop);
// ===========================
// mesh2
// ===========================
tetrahedral_mesh tetmesh;
tetmesh.loadobj("primitives.obj");
gpuErrchk(hipMallocManaged(&mesh, sizeof(mesh2)));
// INDICES
mesh->oldfacenum = tetmesh.oldfacenum;
mesh->oldnodenum = tetmesh.oldnodenum;
mesh->facenum = tetmesh.facenum;
mesh->nodenum = tetmesh.nodenum;
mesh->tetnum = tetmesh.tetnum;
// NODES - GEOMETRY MESH
hipMallocManaged(&mesh->ng_index, mesh->oldnodenum*sizeof(uint32_t));
for (auto i : tetmesh.oldnodes) mesh->ng_index[i.index] = i.index;
hipMallocManaged(&mesh->ng_x, mesh->oldnodenum*sizeof(float));
hipMallocManaged(&mesh->ng_y, mesh->oldnodenum*sizeof(float));
hipMallocManaged(&mesh->ng_z, mesh->oldnodenum*sizeof(float));
for (auto i : tetmesh.oldnodes) mesh->ng_x[i.index] = i.x;
for (auto i : tetmesh.oldnodes) mesh->ng_y[i.index] = i.y;
for (auto i : tetmesh.oldnodes) mesh->ng_z[i.index] = i.z;
// FACES - GEOMETRY MESH
hipMallocManaged(&mesh->fg_index, mesh->oldfacenum*sizeof(uint32_t));
for (auto i : tetmesh.oldfaces) mesh->fg_index[i.index] = i.index;
hipMallocManaged(&mesh->fg_node_a, mesh->oldfacenum*sizeof(uint32_t));
hipMallocManaged(&mesh->fg_node_b, mesh->oldfacenum*sizeof(uint32_t));
hipMallocManaged(&mesh->fg_node_c, mesh->oldfacenum*sizeof(uint32_t));
for (auto i : tetmesh.oldfaces) mesh->fg_node_a[i.index] = i.node_a;
for (auto i : tetmesh.oldfaces) mesh->fg_node_b[i.index] = i.node_b;
for (auto i : tetmesh.oldfaces) mesh->fg_node_c[i.index] = i.node_c;
// NODES
hipMallocManaged(&mesh->n_index, mesh->nodenum*sizeof(uint32_t));
for (auto i : tetmesh.nodes) mesh->n_index[i.index] = i.index;
hipMallocManaged(&mesh->n_x, mesh->nodenum*sizeof(float));
hipMallocManaged(&mesh->n_y, mesh->nodenum*sizeof(float));
hipMallocManaged(&mesh->n_z, mesh->nodenum*sizeof(float));
for (auto i : tetmesh.nodes) mesh->n_x[i.index] = i.x;
for (auto i : tetmesh.nodes) mesh->n_y[i.index] = i.y;
for (auto i : tetmesh.nodes) mesh->n_z[i.index] = i.z;
// FACES
hipMallocManaged(&mesh->f_index, mesh->facenum*sizeof(uint32_t));
for (auto i : tetmesh.faces) mesh->f_index[i.index] = i.index;
hipMallocManaged(&mesh->f_node_a, mesh->facenum*sizeof(uint32_t));
hipMallocManaged(&mesh->f_node_b, mesh->facenum*sizeof(uint32_t));
hipMallocManaged(&mesh->f_node_c, mesh->facenum*sizeof(uint32_t));
for (auto i : tetmesh.faces) mesh->f_node_a[i.index] = i.node_a;
for (auto i : tetmesh.faces) mesh->f_node_b[i.index] = i.node_b;
for (auto i : tetmesh.faces) mesh->f_node_c[i.index] = i.node_c;
// TETRAHEDRA - NEW
hipMallocManaged(&mesh->adjfaces_num, tetmesh.adjfaces_num.size()*sizeof(uint32_t));
hipMallocManaged(&mesh->adjfaces_numlist, tetmesh.adjfaces_numlist.size()*sizeof(uint32_t));
for (int i = 0; i < tetmesh.adjfaces_num.size(); i++) { mesh->adjfaces_num[i] = tetmesh.adjfaces_num.at(i); }
for (int i = 0; i < tetmesh.adjfaces_numlist.size(); i++) { mesh->adjfaces_numlist[i] = tetmesh.adjfaces_numlist.at(i); }
hipMallocManaged(&mesh->hasfaces, mesh->tetnum*sizeof(bool));
for (auto i : tetmesh.tetrahedras) mesh->hasfaces[i.number] = i.hasfaces;
// TETRAHEDRA
hipMallocManaged(&mesh->t_index, mesh->tetnum*sizeof(uint32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_index[i.number] = i.number;
hipMallocManaged(&mesh->t_findex1, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_findex2, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_findex3, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_findex4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_findex1[i.number] = i.findex1;
for (auto i : tetmesh.tetrahedras) mesh->t_findex2[i.number] = i.findex2;
for (auto i : tetmesh.tetrahedras) mesh->t_findex3[i.number] = i.findex3;
for (auto i : tetmesh.tetrahedras) mesh->t_findex4[i.number] = i.findex4;
hipMallocManaged(&mesh->t_nindex1, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_nindex2, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_nindex3, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_nindex4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_nindex1[i.number] = i.nindex1;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex2[i.number] = i.nindex2;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex3[i.number] = i.nindex3;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex4[i.number] = i.nindex4;
hipMallocManaged(&mesh->t_adjtet1, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_adjtet2, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_adjtet3, mesh->tetnum*sizeof(int32_t));
hipMallocManaged(&mesh->t_adjtet4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet1[i.number] = i.adjtet1;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet2[i.number] = i.adjtet2;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet3[i.number] = i.adjtet3;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet4[i.number] = i.adjtet4;
// ===========================
// mesh end
// ===========================
// Get bounding box
box = init_BBox(mesh);
fprintf_s(stderr, "\nBounding box:MIN xyz - %f %f %f \n", box.min.x, box.min.y, box.min.z);
fprintf_s(stderr, " MAX xyz - %f %f %f \n\n", box.max.x, box.max.y, box.max.z);
// Allocate unified memory
gpuErrchk(hipMallocManaged(&finalimage, width * height * sizeof(float3)));
gpuErrchk(hipMallocManaged(&accumulatebuffer, width * height * sizeof(float3)));
// find starting tetrahedra
uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, hostRendercam->position);
gpuErrchk(hipDeviceSynchronize());
if (_start_tet == 0)
{
fprintf(stderr, "Starting point outside tetrahedra! Aborting ... \n");
system("PAUSE");
exit(0);
}
else fprintf(stderr, "Starting tetrahedra - camera: %lu \n", _start_tet);
// main render function
render();
gpuErrchk(hipDeviceReset());
glfwTerminate();
return 0;
}
| 467f820abee797bcc429bc5feaca8895a25f758e.cu | /*
* tetrahedra-based raytracer
* Copyright (C) 2015-2016 Christian Lehmann
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define GLEW_STATIC
#include "Util.h"
#include "mesh_io.h"
#include "tetgenio.h"
#include "Camera.h"
#include "device_launch_parameters.h"
#include "GLFW/glfw3.h"
#include <cuda_gl_interop.h>
#include <curand.h>
#include <curand_kernel.h>
#include "Sphere.h"
#define spp 1
#define gamma 2.2f
#define MAX_DEPTH 2
#define width 800
#define height 600
float3* finalimage;
float3* accumulatebuffer;
uint32_t frameNumber = 0;
bool bufferReset = false;
float deltaTime, lastFrame;
BBox box;
GLuint vbo;
mesh2 *mesh;
__managed__ bool edgeVisualization = false;
// Camera
InteractiveCamera* interactiveCamera = NULL;
Camera* hostRendercam = NULL;
int mouse_old_x, mouse_old_y;
int mouse_buttons = 0;
bool buttonActive = false, enableMouseMovement = true, cursorFree = false;
float rotate_x = 0.0, rotate_y = 0.0;
float translate_z = 0.0;
int lastX = width / 2, lastY = height / 2;
int theButtonState = 0;
int theModifierState = 0;
float scalefactor = 1.2f;
union Color // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
// CUDA error checking
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
system("PAUSE");
if (abort) exit(code);
}
}
unsigned int WangHash(unsigned int a) {
// richiesams.blogspot.co.nz/2015/03/creating-randomness-and-acummulating.html
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
static void error_callback(int error, const char* description)
{
// GLFW error callback
fputs(description, stderr);
}
void updateCamPos()
{
float4 pos = hostRendercam->position;
// check if current pos is still inside tetrahedralization
ClampToBBox(&box, hostRendercam->position);
// look for new tetrahedra...
/*uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, pos);
gpuErrchk(cudaDeviceSynchronize());*/
// ändern: von _start_tet die vier adjtets laden, mit IsPointInTetrahedron checken
int32_t adjtets[4] = { mesh->t_adjtet1[_start_tet], mesh->t_adjtet2[_start_tet], mesh->t_adjtet3[_start_tet], mesh->t_adjtet4[_start_tet] };
if (!IsPointInThisTetCPU(mesh, pos, _start_tet))
{
//fprintf(stderr, "Alert - Outside \n");
//fprintf(stderr, "Adjacent tets: %ld %ld %ld %ld \n", adjtets[0], adjtets[1], adjtets[2], adjtets[3]);
if (IsPointInThisTetCPU(mesh, pos, adjtets[0])) _start_tet = adjtets[0];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[1])) _start_tet = adjtets[1];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[2])) _start_tet = adjtets[2];
else if (IsPointInThisTetCPU(mesh, pos, adjtets[3])) _start_tet = adjtets[3];
else
{
fprintf(stderr, "Fallback to CUDA search for starting tet\n");
uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, pos);
gpuErrchk(cudaDeviceSynchronize());
}
//fprintf(stderr, "New starting tet: %ld \n", _start_tet);
}
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
float dist = 0.3; // skipping tetras if set too high...
if (action == GLFW_PRESS) buttonActive = true;
if (action == GLFW_RELEASE) buttonActive = false;
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (key == GLFW_KEY_A && buttonActive)
{
interactiveCamera->strafe(-dist); updateCamPos();
}
if (key == GLFW_KEY_D && buttonActive)
{
interactiveCamera->strafe(dist); updateCamPos();
}
if (key == GLFW_KEY_W && buttonActive)
{
interactiveCamera->goForward(dist); updateCamPos();
}
if (key == GLFW_KEY_S && buttonActive)
{
interactiveCamera->goForward(-dist); updateCamPos();
}
if (key == GLFW_KEY_R && buttonActive)
{
interactiveCamera->changeAltitude(dist); updateCamPos();
}
if (key == GLFW_KEY_F && buttonActive)
{
interactiveCamera->changeAltitude(-dist); updateCamPos();
}
if (key == GLFW_KEY_G && buttonActive)
{
interactiveCamera->changeApertureDiameter(0.1);
}
if (key == GLFW_KEY_H && buttonActive)
{
interactiveCamera->changeApertureDiameter(-0.1);
}
if (key == GLFW_KEY_T && buttonActive)
{
interactiveCamera->changeFocalDistance(0.1);
}
if (key == GLFW_KEY_Z && buttonActive)
{
interactiveCamera->changeFocalDistance(-0.1);
}
if (key == GLFW_KEY_UP && buttonActive)
{
interactiveCamera->changePitch(0.02f);
}
if (key == GLFW_KEY_DOWN && buttonActive)
{
interactiveCamera->changePitch(-0.02f);
}
if (key == GLFW_KEY_LEFT && buttonActive)
{
interactiveCamera->changeYaw(0.02f);
}
if (key == GLFW_KEY_RIGHT && buttonActive)
{
interactiveCamera->changeYaw(-0.02f);
}
if (key == GLFW_KEY_B && buttonActive)
{
// debug stuff
updateCamPos();
}
if (key == GLFW_KEY_M && action == GLFW_PRESS)
{
// debug stuff
if (!edgeVisualization) edgeVisualization = true; else edgeVisualization = false;
}
if (key == GLFW_KEY_C && action == GLFW_PRESS)
{
if (cursorFree == false) { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL); cursorFree = true; enableMouseMovement = false; }
else { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); cursorFree = false; enableMouseMovement = true; }
}
bufferReset = true;
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS) theButtonState = 0;
if (button == GLFW_MOUSE_BUTTON_MIDDLE && action == GLFW_PRESS) theButtonState = 1;
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS) theButtonState = 2;
}
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
int deltaX = lastX - xpos;
int deltaY = lastY - ypos;
if (enableMouseMovement) if (deltaX != 0 || deltaY != 0) {
if (theButtonState == 0) // Rotate
{
interactiveCamera->changeYaw(deltaX * 0.01);
interactiveCamera->changePitch(-deltaY * 0.01);
}
else if (theButtonState == 1) // Zoom
{
interactiveCamera->changeAltitude(-deltaY * 0.01);
updateCamPos();
}
if (theButtonState == 2) // camera move
{
interactiveCamera->changeRadius(-deltaY * 0.01);
updateCamPos();
}
lastX = xpos;
lastY = ypos;
bufferReset = true;
}
}
__device__ RGB radiance(mesh2 *mesh, int32_t start, Ray &ray, float4 oldpos, curandState* randState)
{
float4 mask = make_float4(1.0f, 1.0f, 1.0f, 1.0f); // colour mask (accumulated reflectance)
float4 accucolor = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // accumulated colour
float4 originInWorldSpace = ray.o;
float4 rayInWorldSpace = ray.d;
int32_t newstart = start;
for (int bounces = 0; bounces < MAX_DEPTH; bounces++)
{
float4 f = make_float4(0, 0, 0, 0); // primitive colour
float4 emit = make_float4(0, 0, 0, 0); // primitive emission colour
float4 x; // intersection point
float4 n; // normal
float4 nl; // oriented normal
float4 dw; // ray direction of next path segment
float4 pointHitInWorldSpace;
float3 rayorig = make_float3(originInWorldSpace.x, originInWorldSpace.y, originInWorldSpace.z);
float3 raydir = make_float3(rayInWorldSpace.x, rayInWorldSpace.y, rayInWorldSpace.z);
bool isEdge = false;
double dist;
rayhit firsthit;
Geometry geom;
// ------------------------------ TRIANGLE intersection --------------------------------------------
traverse_ray(mesh, originInWorldSpace, rayInWorldSpace, newstart, firsthit, dist, edgeVisualization, isEdge, n);
pointHitInWorldSpace = originInWorldSpace + rayInWorldSpace * dist;
// ------------------------------ SPHERE intersection --------------------------------------------
float4 spherePos = make_float4(10,10,10,0);
float sphereRad = 10, sphereDist = 0;
bool spheresEnabled = false;
if (spheresEnabled) { sphereDist = sphIntersect(originInWorldSpace, rayInWorldSpace, spherePos, sphereRad); }
if (sphereDist > 0.0) { geom = SPHERE; traverse_until_point(mesh, originInWorldSpace, rayInWorldSpace, newstart, originInWorldSpace + rayInWorldSpace * sphereDist, firsthit); }
else { geom = TRIANGLE; }
if (geom == SPHERE)
{
emit = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
f = make_float4(1.0f, 1.0f, 1.0f, 0.0f);
firsthit.refl_t = REFR;
x = originInWorldSpace + rayInWorldSpace * sphereDist;
n= normalize((x - spherePos));
nl = Dot(n, rayInWorldSpace) < 0 ? n : n * -1;
}
if (geom == TRIANGLE)
{
x = pointHitInWorldSpace;
n = normalize(n);
nl = Dot(n, rayInWorldSpace) < 0 ? n : n * -1;
if (firsthit.constrained == true) { emit = make_float4(0.0f, 0.0f, 0.0f, 0.0f); f = make_float4(0.0f, 0.0f, 0.75f, 0.0f); } // blue is constrained
if (firsthit.wall == true)
{
emit = make_float4(1.0f, 0.0f, 0.0f, 0.0f); // wall wird erkannt
f = make_float4(0.3f, 0.1f, 0.4f, 0.0f);
/*float4 color1 = make_float4(0, 0, 0, 0);
float4 color2 = make_float4(0.0f, 1.0f, 1.0f, 0);
float percent = (((rayInWorldSpace.y + 1) * (1 - 0)) / (1 + 1)) + 0;
float red = color1.x + percent * (color2.x - color1.x);
float green = color1.y + percent * (color2.y - color1.y);
float blue = color1.z + percent * (color2.z - color1.z);
f = make_float4(red, green, blue, 0);*/
}
// dark ist weiß
if (firsthit.dark == true) { emit = make_float4(12.0f, 12.0f, 7.0f, 0.0f); f = make_float4(0.0f, 1.0f, 0.0f, 0.0f); /*printf("Éncountered dark state\n");*/ }
//if (firsthit.face == 3 || firsthit.face == 6) { emit = make_float4(12, 12, 12, 0); f = make_float4(0.0f, 0.0f, 0.0f, 0.0f); }
if (firsthit.constrained == true) { firsthit.refl_t = DIFF; }
if (firsthit.wall == true) { firsthit.refl_t = DIFF; }
if (firsthit.dark == true) { firsthit.refl_t = DIFF; }
if (edgeVisualization && isEdge) { emit = make_float4(1.0f, 1.0f, 0.0f, 0.0f); f = make_float4(1.0f, 0.0f, 0.0f, 0.0f);} // visualize wall/constrained edges
}
// basic material system, all parameters are hard-coded (such as phong exponent, index of refraction)
accucolor += (mask * emit);
// diffuse material, based on smallpt by Kevin Beason
if (firsthit.refl_t == DIFF){
// pick two random numbers
float phi = 2 * _PI_ * curand_uniform(randState);
float r2 = curand_uniform(randState);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float4 w = nl; w = normalize(w);
float4 u = Cross((fabs(w.x) > .1 ? make_float4(0, 1, 0, 0) : make_float4(1, 0, 0, 0)), w); u = normalize(u);
float4 v = Cross(w, u);
// compute cosine weighted random ray direction on hemisphere
dw = u*cosf(phi)*r2s + v*sinf(phi)*r2s + w*sqrtf(1 - r2);
dw = normalize(dw);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + w * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// Phong metal material from "Realistic Ray Tracing", P. Shirley
if (firsthit.refl_t == METAL){
// compute random perturbation of ideal reflection vector
// the higher the phong exponent, the closer the perturbed vector is to the ideal reflection direction
float phi = 2 * _PI_ * curand_uniform(randState);
float r2 = curand_uniform(randState);
float phongexponent = 20;
float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1));
float sinTheta = sqrtf(1 - cosTheta * cosTheta);
// create orthonormal basis uvw around reflection vector with hitpoint as origin
// w is ray direction for ideal reflection
float4 w = rayInWorldSpace - n * 2.0f * Dot(n, rayInWorldSpace); w = normalize(w);
float4 u = Cross((fabs(w.x) > .1 ? make_float4(0, 1, 0, 0) : make_float4(1, 0, 0, 0)), w); u = normalize(u);
float4 v = Cross(w, u); // v is normalised by default
// compute cosine weighted random ray direction on hemisphere
dw = u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta;
dw = normalize(dw);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + w * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// specular material (perfect mirror)
if (firsthit.refl_t == SPEC){
// compute reflected ray direction according to Snell's law
dw = rayInWorldSpace - n * 2.0f * Dot(n, rayInWorldSpace);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
// multiply mask with colour of object
mask *= f;
}
// perfectly refractive material (glass, water)
if (firsthit.refl_t == REFR){
bool into = Dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = Dot(rayInWorldSpace, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
dw = rayInWorldSpace;
dw -= n * 2.0f * Dot(n, rayInWorldSpace);
// offset origin next path segment to prevent self intersection
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
}
else // cos2t > 0
{
// compute direction of transmission ray
float4 tdir = rayInWorldSpace * nnt;
tdir -= n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t)));
tdir = normalize(tdir);
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : Dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (curand_uniform(randState) < 0.25) // reflection ray
{
mask *= RP;
dw = rayInWorldSpace;
dw -= n * 2.0f * Dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01; // scene size dependent
}
else // transmission ray
{
mask *= TP;
dw = tdir; //r = Ray(x, tdir);
pointHitInWorldSpace = x + nl * 0.001f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
originInWorldSpace = pointHitInWorldSpace;
rayInWorldSpace = dw;
newstart = firsthit.tet; // new tet origin
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return RGB(accucolor.x, accucolor.y, accucolor.z);
}
__global__ void renderKernel(mesh2 *tetmesh, int32_t start, float3 *accumbuffer, float3 *c, unsigned int hashedframenumber, unsigned int framenumber, float4 position, float4 view, float4 up, float fovx, float fovy, float focalDistance, float apertureRadius)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = (height - y - 1)*width + x;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curandState randState;
curand_init(hashedframenumber + threadId, 0, 0, &randState);
int pixelx = x; // pixel x-coordinate on screen
int pixely = height - y - 1; // pixel y-coordintate on screen
float4 rendercampos = make_float4(position.x, position.y, position.z, 0);
RGB finalcol(0);
for (int s = 0; s < spp; s++)
{
float4 rendercamview = make_float4(view.x, view.y, view.z, 0); rendercamview = normalize(rendercamview); // view is already supposed to be normalized, but normalize it explicitly just in case.
float4 rendercamup = make_float4(up.x, up.y, up.z, 0); rendercamup = normalize(rendercamup);
float4 horizontalAxis = Cross(rendercamview, rendercamup); horizontalAxis = normalize(horizontalAxis); // Important to normalize!
float4 verticalAxis = Cross(horizontalAxis, rendercamview); verticalAxis = normalize(verticalAxis); // verticalAxis is normalized by default, but normalize it explicitly just for good measure.
float4 middle = rendercampos + rendercamview;
float4 horizontal = horizontalAxis * tanf(fovx * 0.5 * (_PI_ / 180)); // Now treating FOV as the full FOV, not half, so I multiplied it by 0.5. I also normzlized A and B, so there's no need to divide by the length of A or B anymore. Also normalized view and removed lengthOfView. Also removed the cast to float.
float4 vertical = verticalAxis * tanf(-fovy * 0.5 * (_PI_ / 180)); // Now treating FOV as the full FOV, not half, so I multiplied it by 0.5. I also normzlized A and B, so there's no need to divide by the length of A or B anymore. Also normalized view and removed lengthOfView. Also removed the cast to float.
float jitterValueX = curand_uniform(&randState) - 0.5;
float jitterValueY = curand_uniform(&randState) - 0.5;
float sx = (jitterValueX + pixelx) / (width - 1);
float sy = (jitterValueY + pixely) / (height - 1);
float4 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float4 pointOnImagePlane = rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * focalDistance); // Important for depth of field!
float4 aperturePoint;
if (apertureRadius > 0.00001)
{
float random1 = curand_uniform(&randState);
float random2 = curand_uniform(&randState);
float angle = 2 * _PI_ * random1;
float distance = apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
}
else { aperturePoint = rendercampos; }
// calculate ray direction of next ray in path
float4 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = normalize(apertureToImagePlane); // ray direction, needs to be normalised
float4 rayInWorldSpace = apertureToImagePlane;
rayInWorldSpace = normalize(rayInWorldSpace);
float4 originInWorldSpace = aperturePoint;
finalcol += radiance(tetmesh, start, Ray(originInWorldSpace, rayInWorldSpace), rendercampos, &randState) * (1.0f / spp);
}
accumbuffer[i] += finalcol;
float3 tempcol = accumbuffer[i] / framenumber;
Color fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / gamma) * 255), (unsigned char)(powf(colour.y, 1 / gamma) * 255), (unsigned char)(powf(colour.z, 1 / gamma) * 255), 1);
c[i] = make_float3(x, y, fcolour.c);
}
void render()
{
GLFWwindow* window;
if (!glfwInit()) exit(EXIT_FAILURE);
window = glfwCreateWindow(width, height, "", NULL, NULL);
glfwMakeContextCurrent(window);
glfwSetErrorCallback(error_callback);
glfwSetKeyCallback(window, key_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
glewExperimental = GL_TRUE;
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 "))
{
fprintf(stderr, "GLEW not supported.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "GLEW successfully initialized \n");
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
glOrtho(0.0, width, 0.0, height, 0, 1);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, width * height * sizeof(float3), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaGraphicsResource_t _cgr;
size_t num_bytes;
cudaGraphicsGLRegisterBuffer(&_cgr, vbo, cudaGraphicsRegisterFlagsNone);
fprintf(stderr, "VBO created \n");
fprintf(stderr, "Entering glutMainLoop... \n");
my_stbtt_initfont(); // font initialization
while (!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (bufferReset)
{
frameNumber = 0;
cudaMemset(accumulatebuffer, 1, width * height * sizeof(float3));
}
bufferReset = false;
frameNumber++;
interactiveCamera->buildRenderCamera(hostRendercam);
// Calculate deltatime of current frame
GLfloat currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
std::stringstream title;
title << "Tetrahedral pathtracing with node-based tetrahedral mesh (2016) by Christian Lehmann";
glfwSetWindowTitle(window, title.str().c_str());
// CUDA interop
cudaGraphicsMapResources(1, &_cgr, 0);
cudaGraphicsResourceGetMappedPointer((void**)&finalimage, &num_bytes, _cgr);
glClear(GL_COLOR_BUFFER_BIT);
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
renderKernel << <grid, block >> >(mesh, _start_tet, accumulatebuffer, finalimage, WangHash(frameNumber), frameNumber,
hostRendercam->position, hostRendercam->view, hostRendercam->up, hostRendercam->fov.x, hostRendercam->fov.x,
hostRendercam->focalDistance, hostRendercam->apertureRadius);
gpuErrchk(cudaDeviceSynchronize());
cudaGraphicsUnmapResources(1, &_cgr, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
float mrays = width*height*MAX_DEPTH*0.000001 / deltaTime;
std::string a = "Currently " + std::to_string(mrays) + " Mray/s";
my_stbtt_print(100, 200, a, make_float3(1, 1, 1));
std::string str_orig = std::to_string(hostRendercam->position.x) + " " + std::to_string(hostRendercam->position.y) + " " + std::to_string(hostRendercam->position.z);
std::string str_dir = std::to_string(hostRendercam->view.x) + " " + std::to_string(hostRendercam->view.y) + " " + std::to_string(hostRendercam->view.z);
my_stbtt_print(100, 150, "Ray origin: " + str_orig, make_float3(1, 1, 1));
my_stbtt_print(100, 100, "Ray direction: " + str_dir, make_float3(1, 1, 1));
my_stbtt_print(100, 50, "Current tet: " + std::to_string(_start_tet), make_float3(1, 1, 1));
my_stbtt_print(100, 10, "Current ms/frame: " + std::to_string(deltaTime*1000), make_float3(1, 1, 1));
glfwSwapBuffers(window);
}
}
int main(int argc, char *argv[])
{
delete interactiveCamera;
interactiveCamera = new InteractiveCamera();
interactiveCamera->setResolution(width, height);
interactiveCamera->setFOVX(45);
hostRendercam = new Camera();
interactiveCamera->buildRenderCamera(hostRendercam);
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
cudaChooseDevice(&dev, &prop);
// ===========================
// mesh2
// ===========================
tetrahedral_mesh tetmesh;
tetmesh.loadobj("primitives.obj");
gpuErrchk(cudaMallocManaged(&mesh, sizeof(mesh2)));
// INDICES
mesh->oldfacenum = tetmesh.oldfacenum;
mesh->oldnodenum = tetmesh.oldnodenum;
mesh->facenum = tetmesh.facenum;
mesh->nodenum = tetmesh.nodenum;
mesh->tetnum = tetmesh.tetnum;
// NODES - GEOMETRY MESH
cudaMallocManaged(&mesh->ng_index, mesh->oldnodenum*sizeof(uint32_t));
for (auto i : tetmesh.oldnodes) mesh->ng_index[i.index] = i.index;
cudaMallocManaged(&mesh->ng_x, mesh->oldnodenum*sizeof(float));
cudaMallocManaged(&mesh->ng_y, mesh->oldnodenum*sizeof(float));
cudaMallocManaged(&mesh->ng_z, mesh->oldnodenum*sizeof(float));
for (auto i : tetmesh.oldnodes) mesh->ng_x[i.index] = i.x;
for (auto i : tetmesh.oldnodes) mesh->ng_y[i.index] = i.y;
for (auto i : tetmesh.oldnodes) mesh->ng_z[i.index] = i.z;
// FACES - GEOMETRY MESH
cudaMallocManaged(&mesh->fg_index, mesh->oldfacenum*sizeof(uint32_t));
for (auto i : tetmesh.oldfaces) mesh->fg_index[i.index] = i.index;
cudaMallocManaged(&mesh->fg_node_a, mesh->oldfacenum*sizeof(uint32_t));
cudaMallocManaged(&mesh->fg_node_b, mesh->oldfacenum*sizeof(uint32_t));
cudaMallocManaged(&mesh->fg_node_c, mesh->oldfacenum*sizeof(uint32_t));
for (auto i : tetmesh.oldfaces) mesh->fg_node_a[i.index] = i.node_a;
for (auto i : tetmesh.oldfaces) mesh->fg_node_b[i.index] = i.node_b;
for (auto i : tetmesh.oldfaces) mesh->fg_node_c[i.index] = i.node_c;
// NODES
cudaMallocManaged(&mesh->n_index, mesh->nodenum*sizeof(uint32_t));
for (auto i : tetmesh.nodes) mesh->n_index[i.index] = i.index;
cudaMallocManaged(&mesh->n_x, mesh->nodenum*sizeof(float));
cudaMallocManaged(&mesh->n_y, mesh->nodenum*sizeof(float));
cudaMallocManaged(&mesh->n_z, mesh->nodenum*sizeof(float));
for (auto i : tetmesh.nodes) mesh->n_x[i.index] = i.x;
for (auto i : tetmesh.nodes) mesh->n_y[i.index] = i.y;
for (auto i : tetmesh.nodes) mesh->n_z[i.index] = i.z;
// FACES
cudaMallocManaged(&mesh->f_index, mesh->facenum*sizeof(uint32_t));
for (auto i : tetmesh.faces) mesh->f_index[i.index] = i.index;
cudaMallocManaged(&mesh->f_node_a, mesh->facenum*sizeof(uint32_t));
cudaMallocManaged(&mesh->f_node_b, mesh->facenum*sizeof(uint32_t));
cudaMallocManaged(&mesh->f_node_c, mesh->facenum*sizeof(uint32_t));
for (auto i : tetmesh.faces) mesh->f_node_a[i.index] = i.node_a;
for (auto i : tetmesh.faces) mesh->f_node_b[i.index] = i.node_b;
for (auto i : tetmesh.faces) mesh->f_node_c[i.index] = i.node_c;
// TETRAHEDRA - NEW
cudaMallocManaged(&mesh->adjfaces_num, tetmesh.adjfaces_num.size()*sizeof(uint32_t));
cudaMallocManaged(&mesh->adjfaces_numlist, tetmesh.adjfaces_numlist.size()*sizeof(uint32_t));
for (int i = 0; i < tetmesh.adjfaces_num.size(); i++) { mesh->adjfaces_num[i] = tetmesh.adjfaces_num.at(i); }
for (int i = 0; i < tetmesh.adjfaces_numlist.size(); i++) { mesh->adjfaces_numlist[i] = tetmesh.adjfaces_numlist.at(i); }
cudaMallocManaged(&mesh->hasfaces, mesh->tetnum*sizeof(bool));
for (auto i : tetmesh.tetrahedras) mesh->hasfaces[i.number] = i.hasfaces;
// TETRAHEDRA
cudaMallocManaged(&mesh->t_index, mesh->tetnum*sizeof(uint32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_index[i.number] = i.number;
cudaMallocManaged(&mesh->t_findex1, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_findex2, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_findex3, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_findex4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_findex1[i.number] = i.findex1;
for (auto i : tetmesh.tetrahedras) mesh->t_findex2[i.number] = i.findex2;
for (auto i : tetmesh.tetrahedras) mesh->t_findex3[i.number] = i.findex3;
for (auto i : tetmesh.tetrahedras) mesh->t_findex4[i.number] = i.findex4;
cudaMallocManaged(&mesh->t_nindex1, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_nindex2, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_nindex3, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_nindex4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_nindex1[i.number] = i.nindex1;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex2[i.number] = i.nindex2;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex3[i.number] = i.nindex3;
for (auto i : tetmesh.tetrahedras) mesh->t_nindex4[i.number] = i.nindex4;
cudaMallocManaged(&mesh->t_adjtet1, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_adjtet2, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_adjtet3, mesh->tetnum*sizeof(int32_t));
cudaMallocManaged(&mesh->t_adjtet4, mesh->tetnum*sizeof(int32_t));
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet1[i.number] = i.adjtet1;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet2[i.number] = i.adjtet2;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet3[i.number] = i.adjtet3;
for (auto i : tetmesh.tetrahedras) mesh->t_adjtet4[i.number] = i.adjtet4;
// ===========================
// mesh end
// ===========================
// Get bounding box
box = init_BBox(mesh);
fprintf_s(stderr, "\nBounding box:MIN xyz - %f %f %f \n", box.min.x, box.min.y, box.min.z);
fprintf_s(stderr, " MAX xyz - %f %f %f \n\n", box.max.x, box.max.y, box.max.z);
// Allocate unified memory
gpuErrchk(cudaMallocManaged(&finalimage, width * height * sizeof(float3)));
gpuErrchk(cudaMallocManaged(&accumulatebuffer, width * height * sizeof(float3)));
// find starting tetrahedra
uint32_t _dim = 2 + pow(mesh->tetnum, 0.25);
dim3 Block(_dim, _dim, 1);
dim3 Grid(_dim, _dim, 1);
GetTetrahedraFromPoint << <Grid, Block >> >(mesh, hostRendercam->position);
gpuErrchk(cudaDeviceSynchronize());
if (_start_tet == 0)
{
fprintf(stderr, "Starting point outside tetrahedra! Aborting ... \n");
system("PAUSE");
exit(0);
}
else fprintf(stderr, "Starting tetrahedra - camera: %lu \n", _start_tet);
// main render function
render();
gpuErrchk(cudaDeviceReset());
glfwTerminate();
return 0;
}
|
33ec5c4c2e6cac8ee154e57ca9c5bf96e53478a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "reduce.h"
#include <stdio.h>
#include <cmath>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(38.716 * 10^-3 s)
// 13.867 GB/s = 96.297% -> excellent memory bandwidth
// Reasonable point to stop working on this implementation's optimization
// Algorithm is not compute-intensive, so acheiving >75% of theoretical bandwidth is goal
// Main strategies used:
// - Process as much data as possible (in terms of algorithm correctness) in shared memory
// - Use sequential addressing to get rid of bank conflicts
__device__
int sint4korr(const char *record_ptr) {
int result;
char *result_ptr = (char *) &result;
for (unsigned long i = 0; i < sizeof(int); ++i) {
result_ptr[i] = record_ptr[i];
}
return result;
}
__global__
void block_sum_reduce(unsigned int* const d_block_sums,
const unsigned int* const d_in,
const unsigned int d_in_len)
{
extern __shared__ unsigned int s_out[];
unsigned int max_elems_per_block = blockDim.x * 2;
unsigned int glbl_tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tid = threadIdx.x;
// Zero out shared memory
// Especially important when padding shmem for
// non-power of 2 sized input
s_out[threadIdx.x] = 0;
s_out[threadIdx.x + blockDim.x] = 0;
__syncthreads();
// Copy d_in to shared memory per block
if (glbl_tid < d_in_len)
{
s_out[threadIdx.x] = d_in[glbl_tid];
if (glbl_tid + blockDim.x < d_in_len)
s_out[threadIdx.x + blockDim.x] = d_in[glbl_tid + blockDim.x];
}
__syncthreads();
// Actually do the reduction
for (unsigned int s = blockDim.x; s > 0; s >>= 1) {
if (tid < s) {
s_out[tid] += s_out[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
d_block_sums[blockIdx.x] = s_out[0];
}
__global__
void block_sum_reduce_dma(unsigned int* const d_block_sums,
const unsigned int* const d_in,
const unsigned int d_in_len)
{
extern __shared__ unsigned int s_out[];
unsigned int max_elems_per_block = blockDim.x * 2;
unsigned int glbl_tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tid = threadIdx.x;
// Zero out shared memory
// Especially important when padding shmem for
// non-power of 2 sized input
s_out[threadIdx.x] = 0;
s_out[threadIdx.x + blockDim.x] = 0;
__syncthreads();
// Copy d_in to shared memory per block
if (glbl_tid < d_in_len)
{
s_out[threadIdx.x] = d_in[glbl_tid];
if (glbl_tid + blockDim.x < d_in_len)
s_out[threadIdx.x + blockDim.x] = d_in[glbl_tid + blockDim.x];
}
__syncthreads();
// Actually do the reduction
for (unsigned int s = blockDim.x; s > 0; s >>= 1) {
if (tid < s) {
s_out[tid] += s_out[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
d_block_sums[blockIdx.x] = s_out[0];
}
// On my current laptop with GTX 850M, theoretical peak bandwidth is 14.4 GB/s
// Shared memory of GTX 850M has 32 memory banks
// Succeeding measurements are for the Release build
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(173.444 * 10^-3 s)
// 3.095 GB/s = 21.493% -> bad kernel memory bandwidth
__global__ void reduce0(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Interleaved addressing, which causes huge thread divergence
// because threads are active/inactive according to their thread IDs
// being powers of two. The if conditional here is guaranteed to diverge
// threads within a warp.
for (unsigned int s = 1; s < 2048; s <<= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(81.687 * 10^-3 s)
// 6.572 GB/s = 45.639% -> bad kernel memory bandwidth, but better than last time
__global__ void reduce1(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Interleaved addressing, but threads being active/inactive
// is no longer based on thread IDs being powers of two. Consecutive
// threadIDs now run, and thus solves the thread diverging issue within
// a warp
// However, this introduces shared memory bank conflicts, as threads start
// out addressing with a stride of two 32-bit words (unsigned ints),
// and further increase the stride as the current power of two grows larger
// (which can worsen or lessen bank conflicts, depending on the amount
// of stride)
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
unsigned int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(67.699 * 10^-3 s)
// 7.930 GB/s = 55.069% -> good kernel memory bandwidth
__global__ void reduce2(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Sequential addressing. This solves the bank conflicts as
// the threads now access shared memory with a stride of one
// 32-bit word (unsigned int) now, which does not cause bank
// conflicts
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(58.505 * 10^-3 s)
// 9.176 GB/s = 63.722% -> good kernel memory bandwidth, better than last time
__global__ void reduce3(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
// Do the first stage of the reduction on the global-to-shared load step
// This reduces the previous inefficiency of having half of the threads being
// inactive on the first for-loop iteration below (previous first step of reduction)
// Previously, only less than or equal to 512 out of 1024 threads in a block are active.
// Now, all 512 threads in a block are active from the start
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
// do reduction in shared mem
// this loop now starts with s = 512 / 2 = 256
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(37.255 * 10^-3 s)
// 14.411 GB/s = 100% -> perfect bandwidth? is this even possible?
// ***In my laptop, measurements are wrong since the release version oddly outputs an incorrect value***
__global__ void reduce4(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
// Do the first stage of the reduction on the global-to-shared load step
// This reduces the previous inefficiency of having half of the threads being
// inactive on the first for-loop iteration below (previous first step of reduction)
// Previously, only less than or equal to 512 out of 1024 threads in a block are active.
// Now, all 512 threads in a block are active from the start
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
// do reduction in shared mem
// this loop now starts with s = 512 / 2 = 256
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
void print_d_array(unsigned int* d_array, unsigned int len)
{
unsigned int* h_array = new unsigned int[len];
checkCudaErrors(hipMemcpy(h_array, d_array, sizeof(unsigned int) * len, hipMemcpyDeviceToHost));
for (unsigned int i = 0; i < len; ++i)
{
std::cout << i << " : " <<h_array[i] << std::endl;
}
delete[] h_array;
}
unsigned int gpu_sum_reduce(unsigned int* d_in, unsigned int d_in_len)
{
unsigned int total_sum = 0;
// Set up number of threads and blocks
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the least number of 2048-blocks greater than the input size
unsigned int block_sz = MAX_BLOCK_SZ; // Halve the block size due to reduce3() and further
// optimizations from there
// our block_sum_reduce()
unsigned int max_elems_per_block = block_sz * 2; // due to binary tree nature of algorithm
// NVIDIA's reduceX()
//unsigned int max_elems_per_block = block_sz;
unsigned int grid_sz = 0;
if (d_in_len <= max_elems_per_block)
{
grid_sz = (unsigned int)::ceil(float(d_in_len) / float(max_elems_per_block));
}
else
{
grid_sz = d_in_len / max_elems_per_block;
if (d_in_len % max_elems_per_block != 0)
grid_sz++;
}
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks / grid size
unsigned int* d_block_sums;
checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum data allocated for each block
hipLaunchKernelGGL(( block_sum_reduce), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * max_elems_per_block, 0, d_block_sums, d_in, d_in_len);
//reduce4<<<grid_sz, block_sz, sizeof(unsigned int) * block_sz>>>(d_block_sums, d_in, d_in_len);
//print_d_array(d_block_sums, grid_sz);
// Sum each block's total sums (to get global total sum)
// Use basic implementation if number of total sums is <= 2048
// Else, recurse on this same function
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_total_sum;
checkCudaErrors(hipMalloc(&d_total_sum, sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_total_sum, 0, sizeof(unsigned int)));
hipLaunchKernelGGL(( block_sum_reduce), dim3(1), dim3(block_sz), sizeof(unsigned int) * max_elems_per_block, 0, d_total_sum, d_block_sums, grid_sz);
//reduce4<<<1, block_sz, sizeof(unsigned int) * block_sz>>>(d_total_sum, d_block_sums, grid_sz);
checkCudaErrors(hipMemcpy(&total_sum, d_total_sum, sizeof(unsigned int), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_total_sum));
}
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice));
total_sum = gpu_sum_reduce(d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
checkCudaErrors(hipFree(d_block_sums));
return total_sum;
}
unsigned int gpu_sum_reduce_dma(unsigned int* d_in, unsigned int d_in_len)
{
unsigned int total_sum = 0;
// Set up number of threads and blocks
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the least number of 2048-blocks greater than the input size
unsigned int block_sz = MAX_BLOCK_SZ; // Halve the block size due to reduce3() and further
// optimizations from there
// our block_sum_reduce()
unsigned int max_elems_per_block = block_sz * 2; // due to binary tree nature of algorithm
// NVIDIA's reduceX()
//unsigned int max_elems_per_block = block_sz;
unsigned int grid_sz = 0;
if (d_in_len <= max_elems_per_block)
{
grid_sz = (unsigned int)::ceil(float(d_in_len) / float(max_elems_per_block));
}
else
{
grid_sz = d_in_len / max_elems_per_block;
if (d_in_len % max_elems_per_block != 0)
grid_sz++;
}
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks / grid size
unsigned int* d_block_sums;
checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum data allocated for each block
hipLaunchKernelGGL(( block_sum_reduce_dma), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * max_elems_per_block, 0, d_block_sums, d_in, d_in_len);
//reduce4<<<grid_sz, block_sz, sizeof(unsigned int) * block_sz>>>(d_block_sums, d_in, d_in_len);
//print_d_array(d_block_sums, grid_sz);
// Sum each block's total sums (to get global total sum)
// Use basic implementation if number of total sums is <= 2048
// Else, recurse on this same function
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_total_sum;
checkCudaErrors(hipMalloc(&d_total_sum, sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_total_sum, 0, sizeof(unsigned int)));
hipLaunchKernelGGL(( block_sum_reduce_dma), dim3(1), dim3(block_sz), sizeof(unsigned int) * max_elems_per_block, 0, d_total_sum, d_block_sums, grid_sz);
//reduce4<<<1, block_sz, sizeof(unsigned int) * block_sz>>>(d_total_sum, d_block_sums, grid_sz);
checkCudaErrors(hipMemcpy(&total_sum, d_total_sum, sizeof(unsigned int), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_total_sum));
}
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice));
total_sum = gpu_sum_reduce_dma(d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
checkCudaErrors(hipFree(d_block_sums));
return total_sum;
}
| 33ec5c4c2e6cac8ee154e57ca9c5bf96e53478a9.cu | #include "reduce.h"
#include <stdio.h>
#include <cmath>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(38.716 * 10^-3 s)
// 13.867 GB/s = 96.297% -> excellent memory bandwidth
// Reasonable point to stop working on this implementation's optimization
// Algorithm is not compute-intensive, so acheiving >75% of theoretical bandwidth is goal
// Main strategies used:
// - Process as much data as possible (in terms of algorithm correctness) in shared memory
// - Use sequential addressing to get rid of bank conflicts
__device__
int sint4korr(const char *record_ptr) {
int result;
char *result_ptr = (char *) &result;
for (unsigned long i = 0; i < sizeof(int); ++i) {
result_ptr[i] = record_ptr[i];
}
return result;
}
__global__
void block_sum_reduce(unsigned int* const d_block_sums,
const unsigned int* const d_in,
const unsigned int d_in_len)
{
extern __shared__ unsigned int s_out[];
unsigned int max_elems_per_block = blockDim.x * 2;
unsigned int glbl_tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tid = threadIdx.x;
// Zero out shared memory
// Especially important when padding shmem for
// non-power of 2 sized input
s_out[threadIdx.x] = 0;
s_out[threadIdx.x + blockDim.x] = 0;
__syncthreads();
// Copy d_in to shared memory per block
if (glbl_tid < d_in_len)
{
s_out[threadIdx.x] = d_in[glbl_tid];
if (glbl_tid + blockDim.x < d_in_len)
s_out[threadIdx.x + blockDim.x] = d_in[glbl_tid + blockDim.x];
}
__syncthreads();
// Actually do the reduction
for (unsigned int s = blockDim.x; s > 0; s >>= 1) {
if (tid < s) {
s_out[tid] += s_out[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
d_block_sums[blockIdx.x] = s_out[0];
}
__global__
void block_sum_reduce_dma(unsigned int* const d_block_sums,
const unsigned int* const d_in,
const unsigned int d_in_len)
{
extern __shared__ unsigned int s_out[];
unsigned int max_elems_per_block = blockDim.x * 2;
unsigned int glbl_tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tid = threadIdx.x;
// Zero out shared memory
// Especially important when padding shmem for
// non-power of 2 sized input
s_out[threadIdx.x] = 0;
s_out[threadIdx.x + blockDim.x] = 0;
__syncthreads();
// Copy d_in to shared memory per block
if (glbl_tid < d_in_len)
{
s_out[threadIdx.x] = d_in[glbl_tid];
if (glbl_tid + blockDim.x < d_in_len)
s_out[threadIdx.x + blockDim.x] = d_in[glbl_tid + blockDim.x];
}
__syncthreads();
// Actually do the reduction
for (unsigned int s = blockDim.x; s > 0; s >>= 1) {
if (tid < s) {
s_out[tid] += s_out[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
d_block_sums[blockIdx.x] = s_out[0];
}
// On my current laptop with GTX 850M, theoretical peak bandwidth is 14.4 GB/s
// Shared memory of GTX 850M has 32 memory banks
// Succeeding measurements are for the Release build
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(173.444 * 10^-3 s)
// 3.095 GB/s = 21.493% -> bad kernel memory bandwidth
__global__ void reduce0(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Interleaved addressing, which causes huge thread divergence
// because threads are active/inactive according to their thread IDs
// being powers of two. The if conditional here is guaranteed to diverge
// threads within a warp.
for (unsigned int s = 1; s < 2048; s <<= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(81.687 * 10^-3 s)
// 6.572 GB/s = 45.639% -> bad kernel memory bandwidth, but better than last time
__global__ void reduce1(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Interleaved addressing, but threads being active/inactive
// is no longer based on thread IDs being powers of two. Consecutive
// threadIDs now run, and thus solves the thread diverging issue within
// a warp
// However, this introduces shared memory bank conflicts, as threads start
// out addressing with a stride of two 32-bit words (unsigned ints),
// and further increase the stride as the current power of two grows larger
// (which can worsen or lessen bank conflicts, depending on the amount
// of stride)
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
unsigned int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(67.699 * 10^-3 s)
// 7.930 GB/s = 55.069% -> good kernel memory bandwidth
__global__ void reduce2(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i];
}
__syncthreads();
// do reduction in shared mem
// Sequential addressing. This solves the bank conflicts as
// the threads now access shared memory with a stride of one
// 32-bit word (unsigned int) now, which does not cause bank
// conflicts
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(58.505 * 10^-3 s)
// 9.176 GB/s = 63.722% -> good kernel memory bandwidth, better than last time
__global__ void reduce3(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
// Do the first stage of the reduction on the global-to-shared load step
// This reduces the previous inefficiency of having half of the threads being
// inactive on the first for-loop iteration below (previous first step of reduction)
// Previously, only less than or equal to 512 out of 1024 threads in a block are active.
// Now, all 512 threads in a block are active from the start
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
// do reduction in shared mem
// this loop now starts with s = 512 / 2 = 256
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
// Bandwidth: (((2^27) + 1) unsigned ints * 4 bytes/unsigned int)/(37.255 * 10^-3 s)
// 14.411 GB/s = 100% -> perfect bandwidth? is this even possible?
// ***In my laptop, measurements are wrong since the release version oddly outputs an incorrect value***
__global__ void reduce4(unsigned int* g_odata, unsigned int* g_idata, unsigned int len) {
extern __shared__ unsigned int sdata[];
// each thread loads one element from global to shared mem
// Do the first stage of the reduction on the global-to-shared load step
// This reduces the previous inefficiency of having half of the threads being
// inactive on the first for-loop iteration below (previous first step of reduction)
// Previously, only less than or equal to 512 out of 1024 threads in a block are active.
// Now, all 512 threads in a block are active from the start
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < len)
{
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
// do reduction in shared mem
// this loop now starts with s = 512 / 2 = 256
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
void print_d_array(unsigned int* d_array, unsigned int len)
{
unsigned int* h_array = new unsigned int[len];
checkCudaErrors(cudaMemcpy(h_array, d_array, sizeof(unsigned int) * len, cudaMemcpyDeviceToHost));
for (unsigned int i = 0; i < len; ++i)
{
std::cout << i << " : " <<h_array[i] << std::endl;
}
delete[] h_array;
}
unsigned int gpu_sum_reduce(unsigned int* d_in, unsigned int d_in_len)
{
unsigned int total_sum = 0;
// Set up number of threads and blocks
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the least number of 2048-blocks greater than the input size
unsigned int block_sz = MAX_BLOCK_SZ; // Halve the block size due to reduce3() and further
// optimizations from there
// our block_sum_reduce()
unsigned int max_elems_per_block = block_sz * 2; // due to binary tree nature of algorithm
// NVIDIA's reduceX()
//unsigned int max_elems_per_block = block_sz;
unsigned int grid_sz = 0;
if (d_in_len <= max_elems_per_block)
{
grid_sz = (unsigned int)std::ceil(float(d_in_len) / float(max_elems_per_block));
}
else
{
grid_sz = d_in_len / max_elems_per_block;
if (d_in_len % max_elems_per_block != 0)
grid_sz++;
}
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks / grid size
unsigned int* d_block_sums;
checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum data allocated for each block
block_sum_reduce<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_in, d_in_len);
//reduce4<<<grid_sz, block_sz, sizeof(unsigned int) * block_sz>>>(d_block_sums, d_in, d_in_len);
//print_d_array(d_block_sums, grid_sz);
// Sum each block's total sums (to get global total sum)
// Use basic implementation if number of total sums is <= 2048
// Else, recurse on this same function
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_total_sum;
checkCudaErrors(cudaMalloc(&d_total_sum, sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_total_sum, 0, sizeof(unsigned int)));
block_sum_reduce<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_total_sum, d_block_sums, grid_sz);
//reduce4<<<1, block_sz, sizeof(unsigned int) * block_sz>>>(d_total_sum, d_block_sums, grid_sz);
checkCudaErrors(cudaMemcpy(&total_sum, d_total_sum, sizeof(unsigned int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_total_sum));
}
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice));
total_sum = gpu_sum_reduce(d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
checkCudaErrors(cudaFree(d_block_sums));
return total_sum;
}
unsigned int gpu_sum_reduce_dma(unsigned int* d_in, unsigned int d_in_len)
{
unsigned int total_sum = 0;
// Set up number of threads and blocks
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the least number of 2048-blocks greater than the input size
unsigned int block_sz = MAX_BLOCK_SZ; // Halve the block size due to reduce3() and further
// optimizations from there
// our block_sum_reduce()
unsigned int max_elems_per_block = block_sz * 2; // due to binary tree nature of algorithm
// NVIDIA's reduceX()
//unsigned int max_elems_per_block = block_sz;
unsigned int grid_sz = 0;
if (d_in_len <= max_elems_per_block)
{
grid_sz = (unsigned int)std::ceil(float(d_in_len) / float(max_elems_per_block));
}
else
{
grid_sz = d_in_len / max_elems_per_block;
if (d_in_len % max_elems_per_block != 0)
grid_sz++;
}
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks / grid size
unsigned int* d_block_sums;
checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum data allocated for each block
block_sum_reduce_dma<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_in, d_in_len);
//reduce4<<<grid_sz, block_sz, sizeof(unsigned int) * block_sz>>>(d_block_sums, d_in, d_in_len);
//print_d_array(d_block_sums, grid_sz);
// Sum each block's total sums (to get global total sum)
// Use basic implementation if number of total sums is <= 2048
// Else, recurse on this same function
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_total_sum;
checkCudaErrors(cudaMalloc(&d_total_sum, sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_total_sum, 0, sizeof(unsigned int)));
block_sum_reduce_dma<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_total_sum, d_block_sums, grid_sz);
//reduce4<<<1, block_sz, sizeof(unsigned int) * block_sz>>>(d_total_sum, d_block_sums, grid_sz);
checkCudaErrors(cudaMemcpy(&total_sum, d_total_sum, sizeof(unsigned int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_total_sum));
}
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice));
total_sum = gpu_sum_reduce_dma(d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
checkCudaErrors(cudaFree(d_block_sums));
return total_sum;
}
|
267c66e973aed835ceabd34011ebc4e423cdbeb8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int offset,
int* odata, const int* idata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= offset && index < n) {
odata[index] = idata[index - offset] + idata[index];
}
else if (index < offset) {
odata[index] = idata[index];
}
}
__global__ void kernRightShift(int n, int* odata, int* idata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index == 0) {
odata[index] = 0;
}
else if (index < n) {
odata[index] = idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) { // TODO
// Two ping pong buffers
int* dev_data1 = nullptr;
int* dev_data2 = nullptr;
// Allocate memory on device
hipMalloc((void**)&dev_data1, n * sizeof(int));
checkCUDAError("hipMalloc dev_data1 failed!");
hipMalloc((void**)&dev_data2, n * sizeof(int));
checkCUDAError("hipMalloc dev_data2 failed!");
// Copy data from host to device
hipMemcpy(dev_data1, idata, n * sizeof(int), hipMemcpyHostToDevice);
timer().startGpuTimer();
if (n > 0) {
dim3 threadsPerBlock(128);
int d = ilog2ceil(n);
int offset = 1;
dim3 blocks(n / threadsPerBlock.x + 1);
for (int i = 1; i <= d; i++) {
kernNaiveScan << <blocks, threadsPerBlock >> >
(n, offset, dev_data2, dev_data1);
std::swap(dev_data1, dev_data2);
offset <<= 1;
}
// Right shift to get the exclusive prefix sum
kernRightShift << <blocks, threadsPerBlock >> >
(n, dev_data2, dev_data1);
}
timer().endGpuTimer();
// Copy data back from device to host
hipMemcpy(odata, dev_data2, n * sizeof(int), hipMemcpyDeviceToHost);
// Free device memory
hipFree(dev_data1);
hipFree(dev_data2);
}
}
}
| 267c66e973aed835ceabd34011ebc4e423cdbeb8.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int offset,
int* odata, const int* idata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= offset && index < n) {
odata[index] = idata[index - offset] + idata[index];
}
else if (index < offset) {
odata[index] = idata[index];
}
}
__global__ void kernRightShift(int n, int* odata, int* idata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index == 0) {
odata[index] = 0;
}
else if (index < n) {
odata[index] = idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) { // TODO
// Two ping pong buffers
int* dev_data1 = nullptr;
int* dev_data2 = nullptr;
// Allocate memory on device
cudaMalloc((void**)&dev_data1, n * sizeof(int));
checkCUDAError("cudaMalloc dev_data1 failed!");
cudaMalloc((void**)&dev_data2, n * sizeof(int));
checkCUDAError("cudaMalloc dev_data2 failed!");
// Copy data from host to device
cudaMemcpy(dev_data1, idata, n * sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
if (n > 0) {
dim3 threadsPerBlock(128);
int d = ilog2ceil(n);
int offset = 1;
dim3 blocks(n / threadsPerBlock.x + 1);
for (int i = 1; i <= d; i++) {
kernNaiveScan << <blocks, threadsPerBlock >> >
(n, offset, dev_data2, dev_data1);
std::swap(dev_data1, dev_data2);
offset <<= 1;
}
// Right shift to get the exclusive prefix sum
kernRightShift << <blocks, threadsPerBlock >> >
(n, dev_data2, dev_data1);
}
timer().endGpuTimer();
// Copy data back from device to host
cudaMemcpy(odata, dev_data2, n * sizeof(int), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(dev_data1);
cudaFree(dev_data2);
}
}
}
|
fef86de50ec25318da2c0a8991d60e513a67ca5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "Integrator.cuh"
#include <assert.h>
/*! \file Integrator.cu
\brief Defines methods and data structures used by the Integrator class on the GPU
*/
//! helper to add a given force/virial pointer pair
template< unsigned int compute_virial >
__device__ void add_force_total(Scalar4& net_force, Scalar *net_virial, Scalar4& net_torque, Scalar4* d_f, Scalar* d_v, const unsigned int virial_pitch, Scalar4* d_t, int idx)
{
if (d_f != NULL && d_v != NULL && d_t != NULL)
{
Scalar4 f = d_f[idx];
Scalar4 t = d_t[idx];
net_force.x += f.x;
net_force.y += f.y;
net_force.z += f.z;
net_force.w += f.w;
if (compute_virial)
{
for (int i=0; i < 6; i++)
net_virial[i] += d_v[i*virial_pitch+idx];
}
net_torque.x += t.x;
net_torque.y += t.y;
net_torque.z += t.z;
net_torque.w += t.w;
}
}
//! Kernel for summing forces on the GPU
/*! The specified forces and virials are summed for every particle into \a d_net_force and \a d_net_virial
\param d_net_force Output device array to hold the computed net force
\param d_net_virial Output device array to hold the computed net virial
\param net_virial_pitch The pitch of the 2D net_virial array
\param d_net_torque Output device array to hold the computed net torque
\param force_list List of pointers to force data to sum
\param nwork Number of particles this GPU processes
\param clear When true, initializes the sums to 0 before adding. When false, reads in the current \a d_net_force
and \a d_net_virial and adds to that
\param offset of this GPU in ptls array
\tparam compute_virial When set to 0, the virial sum is not computed
*/
template< unsigned int compute_virial >
__global__ void gpu_integrator_sum_net_force_kernel(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list force_list,
unsigned int nwork,
bool clear,
unsigned int offset)
{
// calculate the index we will be handling
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < nwork)
{
idx += offset;
// set the initial net_force and net_virial to sum into
Scalar4 net_force;
Scalar net_virial[6];
Scalar4 net_torque;
if (clear)
{
net_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = Scalar(0.0);
}
net_torque = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
}
else
{
// if clear is false, initialize to the current d_net_force and d_net_virial
net_force = d_net_force[idx];
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = d_net_virial[i*net_virial_pitch+idx];
}
net_torque = d_net_torque[idx];
}
// sum up the totals
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f0, force_list.v0, force_list.vpitch0, force_list.t0, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f1, force_list.v1, force_list.vpitch1, force_list.t1, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f2, force_list.v2, force_list.vpitch2, force_list.t2, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f3, force_list.v3, force_list.vpitch3, force_list.t3, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f4, force_list.v4, force_list.vpitch4, force_list.t4, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f5, force_list.v5, force_list.vpitch5, force_list.t5, idx);
// write out the final result
d_net_force[idx] = net_force;
if (compute_virial)
{
for (int i=0; i < 6; i++)
d_net_virial[i*net_virial_pitch+idx] = net_virial[i];
}
d_net_torque[idx] = net_torque;
}
}
hipError_t gpu_integrator_sum_net_force(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list& force_list,
unsigned int nparticles,
bool clear,
bool compute_virial,
const GPUPartition& gpu_partition)
{
// sanity check
assert(d_net_force);
assert(d_net_virial);
assert(d_net_torque);
const int block_size = 256;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
if (compute_virial)
{
hipLaunchKernelGGL(( gpu_integrator_sum_net_force_kernel<1>), dim3(nwork/block_size+1), dim3(block_size) , 0, 0, d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nwork,
clear,
range.first);
}
else
{
hipLaunchKernelGGL(( gpu_integrator_sum_net_force_kernel<0>), dim3(nwork/block_size+1), dim3(block_size) , 0, 0, d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nwork,
clear,
range.first);
}
}
return hipSuccess;
}
| fef86de50ec25318da2c0a8991d60e513a67ca5f.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "Integrator.cuh"
#include <assert.h>
/*! \file Integrator.cu
\brief Defines methods and data structures used by the Integrator class on the GPU
*/
//! helper to add a given force/virial pointer pair
template< unsigned int compute_virial >
__device__ void add_force_total(Scalar4& net_force, Scalar *net_virial, Scalar4& net_torque, Scalar4* d_f, Scalar* d_v, const unsigned int virial_pitch, Scalar4* d_t, int idx)
{
if (d_f != NULL && d_v != NULL && d_t != NULL)
{
Scalar4 f = d_f[idx];
Scalar4 t = d_t[idx];
net_force.x += f.x;
net_force.y += f.y;
net_force.z += f.z;
net_force.w += f.w;
if (compute_virial)
{
for (int i=0; i < 6; i++)
net_virial[i] += d_v[i*virial_pitch+idx];
}
net_torque.x += t.x;
net_torque.y += t.y;
net_torque.z += t.z;
net_torque.w += t.w;
}
}
//! Kernel for summing forces on the GPU
/*! The specified forces and virials are summed for every particle into \a d_net_force and \a d_net_virial
\param d_net_force Output device array to hold the computed net force
\param d_net_virial Output device array to hold the computed net virial
\param net_virial_pitch The pitch of the 2D net_virial array
\param d_net_torque Output device array to hold the computed net torque
\param force_list List of pointers to force data to sum
\param nwork Number of particles this GPU processes
\param clear When true, initializes the sums to 0 before adding. When false, reads in the current \a d_net_force
and \a d_net_virial and adds to that
\param offset of this GPU in ptls array
\tparam compute_virial When set to 0, the virial sum is not computed
*/
template< unsigned int compute_virial >
__global__ void gpu_integrator_sum_net_force_kernel(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list force_list,
unsigned int nwork,
bool clear,
unsigned int offset)
{
// calculate the index we will be handling
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < nwork)
{
idx += offset;
// set the initial net_force and net_virial to sum into
Scalar4 net_force;
Scalar net_virial[6];
Scalar4 net_torque;
if (clear)
{
net_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = Scalar(0.0);
}
net_torque = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
}
else
{
// if clear is false, initialize to the current d_net_force and d_net_virial
net_force = d_net_force[idx];
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = d_net_virial[i*net_virial_pitch+idx];
}
net_torque = d_net_torque[idx];
}
// sum up the totals
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f0, force_list.v0, force_list.vpitch0, force_list.t0, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f1, force_list.v1, force_list.vpitch1, force_list.t1, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f2, force_list.v2, force_list.vpitch2, force_list.t2, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f3, force_list.v3, force_list.vpitch3, force_list.t3, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f4, force_list.v4, force_list.vpitch4, force_list.t4, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f5, force_list.v5, force_list.vpitch5, force_list.t5, idx);
// write out the final result
d_net_force[idx] = net_force;
if (compute_virial)
{
for (int i=0; i < 6; i++)
d_net_virial[i*net_virial_pitch+idx] = net_virial[i];
}
d_net_torque[idx] = net_torque;
}
}
cudaError_t gpu_integrator_sum_net_force(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list& force_list,
unsigned int nparticles,
bool clear,
bool compute_virial,
const GPUPartition& gpu_partition)
{
// sanity check
assert(d_net_force);
assert(d_net_virial);
assert(d_net_torque);
const int block_size = 256;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
if (compute_virial)
{
gpu_integrator_sum_net_force_kernel<1><<< nwork/block_size+1, block_size >>>(d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nwork,
clear,
range.first);
}
else
{
gpu_integrator_sum_net_force_kernel<0><<< nwork/block_size+1, block_size >>>(d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nwork,
clear,
range.first);
}
}
return cudaSuccess;
}
|
03d09cd678dec03736d66135d97d0bc8ead4798d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
#include <iostream>
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_xyz_count = 0; // *************************************************
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
mutable PtrSz<PointType> output_xyz;
mutable PtrSz<float> output_intensity;
__device__ __forceinline__ float
fetch (pcl::gpu::kinfuLS::tsdf_buffer buffer, int x, int y, int z, int& weight) const
{
float tsdf;
const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
short2* pos = const_cast<short2*> (tmp_pos);
shift_tsdf_pointer (&pos, buffer);
unpack_tsdf (*pos, tsdf, weight);
return tsdf;
}
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
// process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
// process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
// process dz
// if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
}/* if (z + 1 < VOLUME_Z) */
}/* if (W != 0 && F != 1.f) */
}/* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
//not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, output_xyz.data, offset_storage);
}
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
}/* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
// OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD.
// This operator extracts the cloud as TSDF values and X,Y,Z indices.
// The previous operator generates a regular point cloud in meters.
// This one generates a TSDF Point Cloud in grid indices.
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
int ftid = Block::flattenedThreadId ();
int minimum_Z = 0;
int maximum_Z = VOLUME_Z - 1;
for (int z = minimum_Z; z < maximum_Z; ++z)
{
// The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out.
// In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU.
bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ;
float4 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone)
{
int W;
float F = fetch (buffer, x, y, z, W);
if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f)
{
float4 p;
p.x = x;
p.y = y;
p.z = z;
p.w = F;
points[local_count++] = p;
}
}/* if (x < VOLUME_X && y < VOLUME_Y) */
// local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads
// not we fulfilled points array at current iteration
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
int total_warp = __popc (__ballot (local_count > 0))
+ __popc (__ballot (local_count > 1))
+ __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0) ///more than 0 zero-crossings
{
int lane = Warp::laneId (); ///index of thread within warp [0-31]
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
// Pointer to the beginning of the current warp buffer
volatile int* cta_buffer = (int*)(storage_X + storage_index);
// Compute offset of current warp
// Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html)
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ?
// We want to do only 1 operation per warp (not thread) -> because it is faster
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
// Perform compaction (dump all current crossings)
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X
storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y
storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z
storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I
}
// Retrieve Zero-crossings as 3D points
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
float i = storage_I[storage_index + idx];
store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage);
}
// Sanity check to make sure our output_xyz buffer is not full already
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr, int offset) const
{
*(ptr + offset) = make_float4 (x, y, z, 0);
}
//INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays.
// ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray
// ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray
// offset: offset to apply to both XYZ and Intensity
__device__ __forceinline__ void
store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const
{
*(ptr_xyz + offset) = make_float4 (x, y, z, 0);
*(ptr_intensity + offset) = i;
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr, int offset) const
{
*(ptr + offset) = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs)
{
fs ();
}
__global__ void
extractSliceKernel (const FullScan6 fs, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
fs (buffer, minBounds, maxBounds);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output_xyz = output_xyz;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize () );
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) );
// cudaSafeCall ( hipMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) );
return ((size_t)size);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const int shiftX, const int shiftY, const int shiftZ,
PtrSz<PointType> output_xyz, PtrSz<float> output_intensities)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / buffer->voxels_size.x;
fs.cell_size.y = volume_size.y / buffer->voxels_size.y;
fs.cell_size.z = volume_size.z / buffer->voxels_size.z;
fs.output_xyz = output_xyz;
fs.output_intensity = output_intensities;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//Compute slice bounds
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int newZ = buffer->origin_GRID.z + shiftZ;
int3 minBounds, maxBounds;
//X
if (newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x - 1;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1;
}
if (minBounds.x > maxBounds.x)
std::swap (minBounds.x, maxBounds.x);
//Y
if (newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y - 1;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1;
}
if(minBounds.y > maxBounds.y)
std::swap (minBounds.y, maxBounds.y);
//Z
if (newZ >= 0)
{
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = newZ;
}
else
{
minBounds.z = newZ + buffer->voxels_size.z - 1;
maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1;
}
if (minBounds.z > maxBounds.z)
std::swap(minBounds.z, maxBounds.z);
minBounds.x -= buffer->origin_GRID.x;
maxBounds.x -= buffer->origin_GRID.x;
minBounds.y -= buffer->origin_GRID.y;
maxBounds.y -= buffer->origin_GRID.y;
minBounds.z -= buffer->origin_GRID.z;
maxBounds.z -= buffer->origin_GRID.z;
if (minBounds.x < 0) // We are shifting Left
{
minBounds.x += buffer->voxels_size.x;
maxBounds.x += (buffer->voxels_size.x);
}
if (minBounds.y < 0) // We are shifting up
{
minBounds.y += buffer->voxels_size.y;
maxBounds.y += (buffer->voxels_size.y);
}
if (minBounds.z < 0) // We are shifting back
{
minBounds.z += buffer->voxels_size.z;
maxBounds.z += buffer->voxels_size.z;
}
// Extraction call
hipLaunchKernelGGL(( extractSliceKernel), dim3(grid), dim3(block), 0, 0, fs, *buffer, minBounds, maxBounds);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize () );
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) );
return (size_t)size;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
/*
//OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
if (point.x < vx) g.x--;
if (point.y < vy) g.y--;
if (point.z < vz) g.z--;
//float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
//float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
//float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float a = point.x/ cell_size.x - (g.x + 0.5f);
float b = point.y/ cell_size.y - (g.y + 0.5f);
float c = point.z/ cell_size.z - (g.z + 0.5f);
*/
//NEW CODE
float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * c )
) + a * (
(1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * c )
);
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
}
}
}
| 03d09cd678dec03736d66135d97d0bc8ead4798d.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
#include <iostream>
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_xyz_count = 0; // *************************************************
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
mutable PtrSz<PointType> output_xyz;
mutable PtrSz<float> output_intensity;
__device__ __forceinline__ float
fetch (pcl::gpu::kinfuLS::tsdf_buffer buffer, int x, int y, int z, int& weight) const
{
float tsdf;
const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
short2* pos = const_cast<short2*> (tmp_pos);
shift_tsdf_pointer (&pos, buffer);
unpack_tsdf (*pos, tsdf, weight);
return tsdf;
}
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
// process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
// process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) )
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
// process dz
// if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
}/* if (z + 1 < VOLUME_Z) */
}/* if (W != 0 && F != 1.f) */
}/* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
//not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, output_xyz.data, offset_storage);
}
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
}/* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
// OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD.
// This operator extracts the cloud as TSDF values and X,Y,Z indices.
// The previous operator generates a regular point cloud in meters.
// This one generates a TSDF Point Cloud in grid indices.
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
int ftid = Block::flattenedThreadId ();
int minimum_Z = 0;
int maximum_Z = VOLUME_Z - 1;
for (int z = minimum_Z; z < maximum_Z; ++z)
{
// The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out.
// In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU.
bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ;
float4 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone)
{
int W;
float F = fetch (buffer, x, y, z, W);
if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f)
{
float4 p;
p.x = x;
p.y = y;
p.z = z;
p.w = F;
points[local_count++] = p;
}
}/* if (x < VOLUME_X && y < VOLUME_Y) */
// local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads
// not we fulfilled points array at current iteration
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
int total_warp = __popc (__ballot (local_count > 0))
+ __popc (__ballot (local_count > 1))
+ __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0) ///more than 0 zero-crossings
{
int lane = Warp::laneId (); ///index of thread within warp [0-31]
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
// Pointer to the beginning of the current warp buffer
volatile int* cta_buffer = (int*)(storage_X + storage_index);
// Compute offset of current warp
// Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html)
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ?
// We want to do only 1 operation per warp (not thread) -> because it is faster
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
// Perform compaction (dump all current crossings)
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X
storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y
storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z
storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I
}
// Retrieve Zero-crossings as 3D points
int offset_storage = old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE)
{
if (offset_storage >= output_xyz.size) break;
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
float i = storage_I[storage_index + idx];
store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage);
}
// Sanity check to make sure our output_xyz buffer is not full already
bool full = (old_global_count + total_warp) >= output_xyz.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// Prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
// Last block
if (value == total_blocks - 1)
{
output_xyz_count = min ((int)output_xyz.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr, int offset) const
{
*(ptr + offset) = make_float4 (x, y, z, 0);
}
//INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays.
// ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray
// ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray
// offset: offset to apply to both XYZ and Intensity
__device__ __forceinline__ void
store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const
{
*(ptr_xyz + offset) = make_float4 (x, y, z, 0);
*(ptr_intensity + offset) = i;
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr, int offset) const
{
*(ptr + offset) = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs)
{
fs ();
}
__global__ void
extractSliceKernel (const FullScan6 fs, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
fs (buffer, minBounds, maxBounds);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output_xyz = output_xyz;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
extractKernel<<<grid, block>>>(fs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize () );
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) );
// cudaSafeCall ( cudaMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) );
return ((size_t)size);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const int shiftX, const int shiftY, const int shiftZ,
PtrSz<PointType> output_xyz, PtrSz<float> output_intensities)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / buffer->voxels_size.x;
fs.cell_size.y = volume_size.y / buffer->voxels_size.y;
fs.cell_size.z = volume_size.z / buffer->voxels_size.z;
fs.output_xyz = output_xyz;
fs.output_intensity = output_intensities;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//Compute slice bounds
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int newZ = buffer->origin_GRID.z + shiftZ;
int3 minBounds, maxBounds;
//X
if (newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x - 1;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1;
}
if (minBounds.x > maxBounds.x)
std::swap (minBounds.x, maxBounds.x);
//Y
if (newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y - 1;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1;
}
if(minBounds.y > maxBounds.y)
std::swap (minBounds.y, maxBounds.y);
//Z
if (newZ >= 0)
{
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = newZ;
}
else
{
minBounds.z = newZ + buffer->voxels_size.z - 1;
maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1;
}
if (minBounds.z > maxBounds.z)
std::swap(minBounds.z, maxBounds.z);
minBounds.x -= buffer->origin_GRID.x;
maxBounds.x -= buffer->origin_GRID.x;
minBounds.y -= buffer->origin_GRID.y;
maxBounds.y -= buffer->origin_GRID.y;
minBounds.z -= buffer->origin_GRID.z;
maxBounds.z -= buffer->origin_GRID.z;
if (minBounds.x < 0) // We are shifting Left
{
minBounds.x += buffer->voxels_size.x;
maxBounds.x += (buffer->voxels_size.x);
}
if (minBounds.y < 0) // We are shifting up
{
minBounds.y += buffer->voxels_size.y;
maxBounds.y += (buffer->voxels_size.y);
}
if (minBounds.z < 0) // We are shifting back
{
minBounds.z += buffer->voxels_size.z;
maxBounds.z += buffer->voxels_size.z;
}
// Extraction call
extractSliceKernel<<<grid, block>>>(fs, *buffer, minBounds, maxBounds);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize () );
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) );
return (size_t)size;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
/*
//OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
if (point.x < vx) g.x--;
if (point.y < vy) g.y--;
if (point.z < vz) g.z--;
//float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
//float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
//float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float a = point.x/ cell_size.x - (g.x + 0.5f);
float b = point.y/ cell_size.y - (g.y + 0.5f);
float c = point.z/ cell_size.z - (g.z + 0.5f);
*/
//NEW CODE
float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * c )
) + a * (
(1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * c )
+ b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * c )
);
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
extractNormalsKernel<<<grid, block>>>(en);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
}
}
}
|
ec0cda19f0cc4d79407100c0de66361de4278893.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<device_launch_parameters.h>
#include<hip/device_functions.h>
#include "utils.h"
__global__ void kernel_red_op(float * dop, const float * const dip, bool maxbool)
{
extern __shared__ float dats[];
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int thread_id = threadIdx.x;
dats[thread_id] = dip[AID];
__syncthreads();
for (unsigned int vars = blockDim.x / 2; vars > 0; vars >>= 1)
{
if (thread_id < vars)
{
if (maxbool)
dats[thread_id] = max(dats[thread_id], dats[thread_id + vars]);
else
dats[thread_id] = min(dats[thread_id], dats[thread_id + vars]);
}
__syncthreads();
}
if (thread_id == 0)
{
dop[blockIdx.x] = dats[0];
}
}
__global__ void kern_hist(unsigned int * dop, const float * const dip,
const size_t noofbins, float LLR, float minLL)
{
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int b = (dip[AID] - minLL) / LLR * noofbins;
if (b == noofbins) b--;
atomicAdd(&dop[b], 1);
}
__global__ void scan_kernel(unsigned int * dop, const float * const dip,
const size_t noofbins, float LLR, float minLL)
{
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int b = (dip[AID] - minLL) / LLR * noofbins;
if (b == noofbins) b--;
atomicAdd(&dop[b], 1);
}
__global__ void cdfk(unsigned int * dip, const size_t noofbins)
{
int AID = threadIdx.x;
for (int d = 1; d < noofbins; d *= 2) {
if ((AID + 1) % (d * 2) == 0) {
dip[AID] += dip[AID - d];
}
__syncthreads();
}
if (AID == noofbins - 1) dip[AID] = 0;
for (int d = noofbins / 2; d >= 1; d /= 2) {
if ((AID + 1) % (d * 2) == 0) {
unsigned int tmp = dip[AID - d];
dip[AID - d] = dip[AID];
dip[AID] += tmp;
}
__syncthreads();
}
}
__global__ void cdfk_2(unsigned int * dip, const size_t noofbins)
{
int idx = threadIdx.x;
extern __shared__ int temp[];
int outp = 0, inp = 1;
temp[idx] = (idx > 0) ? dip[idx - 1] : 0;
__syncthreads();
for (int offs = 1; offs < n; offs *= 2) {
outp = 1 - outp;
inp = 1 - outp;
if (idx >= offs) {
temp[outp*n+idx] = temp[inp*n+idx - offs] + temp[inp*n+idx];
} else {
temp[outp*n+idx] = temp[inp*n+idx];
}
__syncthreads();
}
dip[idx] = temp[outp*n+idx];
}
void myfuncs(const float* const d_LL,
unsigned int* const d_cdf,
float &minLL,
float &max_LL,
const size_t nr,
const size_t nc,
const size_t noofbins)
{
const int varm = 1 << 10;
int bl_grp = ceil((float)nc * nr / varm);
float *middle_i;
checkCudaErrors(hipMalloc(&middle_i, sizeof(float)* bl_grp));
float *min_d, *max_d;
checkCudaErrors(hipMalloc((void **)&min_d, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&max_d, sizeof(float)));
kernel_red_op << <bl_grp, varm, varm * sizeof(float) >> >(middle_i, d_LL, true);
kernel_red_op << <1, bl_grp, bl_grp * sizeof(float) >> >(max_d, middle_i, true);
kernel_red_op << <bl_grp, varm, varm * sizeof(float) >> >(middle_i, d_LL, false);
kernel_red_op << <1, bl_grp, bl_grp * sizeof(float) >> >(min_d, middle_i, false);
checkCudaErrors(hipMemcpy(&minLL, min_d, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_LL, max_d, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(middle_i));
checkCudaErrors(hipFree(min_d));
checkCudaErrors(hipFree(max_d));
float LLR = max_LL - minLL;
printf("max_LL: %f minLL: %f LLR: %f\n", max_LL, minLL, LLR);
checkCudaErrors(hipMemset(d_cdf, 0, sizeof(unsigned int)* noofbins));
kern_hist << <bl_grp, varm >> >(d_cdf, d_LL, noofbins, LLR, minLL);
cdfk_2 << <1, noofbins, sizeof(unsigned int) * noofbins * 2 >> >(d_cdf, noofbins);
} | ec0cda19f0cc4d79407100c0de66361de4278893.cu | #include<device_launch_parameters.h>
#include<device_functions.h>
#include "utils.h"
__global__ void kernel_red_op(float * dop, const float * const dip, bool maxbool)
{
extern __shared__ float dats[];
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int thread_id = threadIdx.x;
dats[thread_id] = dip[AID];
__syncthreads();
for (unsigned int vars = blockDim.x / 2; vars > 0; vars >>= 1)
{
if (thread_id < vars)
{
if (maxbool)
dats[thread_id] = max(dats[thread_id], dats[thread_id + vars]);
else
dats[thread_id] = min(dats[thread_id], dats[thread_id + vars]);
}
__syncthreads();
}
if (thread_id == 0)
{
dop[blockIdx.x] = dats[0];
}
}
__global__ void kern_hist(unsigned int * dop, const float * const dip,
const size_t noofbins, float LLR, float minLL)
{
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int b = (dip[AID] - minLL) / LLR * noofbins;
if (b == noofbins) b--;
atomicAdd(&dop[b], 1);
}
__global__ void scan_kernel(unsigned int * dop, const float * const dip,
const size_t noofbins, float LLR, float minLL)
{
int AID = threadIdx.x + blockDim.x * blockIdx.x;
int b = (dip[AID] - minLL) / LLR * noofbins;
if (b == noofbins) b--;
atomicAdd(&dop[b], 1);
}
__global__ void cdfk(unsigned int * dip, const size_t noofbins)
{
int AID = threadIdx.x;
for (int d = 1; d < noofbins; d *= 2) {
if ((AID + 1) % (d * 2) == 0) {
dip[AID] += dip[AID - d];
}
__syncthreads();
}
if (AID == noofbins - 1) dip[AID] = 0;
for (int d = noofbins / 2; d >= 1; d /= 2) {
if ((AID + 1) % (d * 2) == 0) {
unsigned int tmp = dip[AID - d];
dip[AID - d] = dip[AID];
dip[AID] += tmp;
}
__syncthreads();
}
}
__global__ void cdfk_2(unsigned int * dip, const size_t noofbins)
{
int idx = threadIdx.x;
extern __shared__ int temp[];
int outp = 0, inp = 1;
temp[idx] = (idx > 0) ? dip[idx - 1] : 0;
__syncthreads();
for (int offs = 1; offs < n; offs *= 2) {
outp = 1 - outp;
inp = 1 - outp;
if (idx >= offs) {
temp[outp*n+idx] = temp[inp*n+idx - offs] + temp[inp*n+idx];
} else {
temp[outp*n+idx] = temp[inp*n+idx];
}
__syncthreads();
}
dip[idx] = temp[outp*n+idx];
}
void myfuncs(const float* const d_LL,
unsigned int* const d_cdf,
float &minLL,
float &max_LL,
const size_t nr,
const size_t nc,
const size_t noofbins)
{
const int varm = 1 << 10;
int bl_grp = ceil((float)nc * nr / varm);
float *middle_i;
checkCudaErrors(cudaMalloc(&middle_i, sizeof(float)* bl_grp));
float *min_d, *max_d;
checkCudaErrors(cudaMalloc((void **)&min_d, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&max_d, sizeof(float)));
kernel_red_op << <bl_grp, varm, varm * sizeof(float) >> >(middle_i, d_LL, true);
kernel_red_op << <1, bl_grp, bl_grp * sizeof(float) >> >(max_d, middle_i, true);
kernel_red_op << <bl_grp, varm, varm * sizeof(float) >> >(middle_i, d_LL, false);
kernel_red_op << <1, bl_grp, bl_grp * sizeof(float) >> >(min_d, middle_i, false);
checkCudaErrors(cudaMemcpy(&minLL, min_d, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_LL, max_d, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(middle_i));
checkCudaErrors(cudaFree(min_d));
checkCudaErrors(cudaFree(max_d));
float LLR = max_LL - minLL;
printf("max_LL: %f minLL: %f LLR: %f\n", max_LL, minLL, LLR);
checkCudaErrors(cudaMemset(d_cdf, 0, sizeof(unsigned int)* noofbins));
kern_hist << <bl_grp, varm >> >(d_cdf, d_LL, noofbins, LLR, minLL);
cdfk_2 << <1, noofbins, sizeof(unsigned int) * noofbins * 2 >> >(d_cdf, noofbins);
} |
1f1a1af27625048ced6cbcd02929bbf0fdb060ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define max(x,y) (x>y?x:y)
#define min(x,y) (x>y?y:x)
#define THREAD_NUM 256
int BLOCK_NUM=0;
void matgen(double* a, int n, int m)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
a[i * m + j] = (double)rand() / RAND_MAX +
(double)rand() / ((long)RAND_MAX * RAND_MAX);
}
}
}
__global__ static void MatMultKernel(const double* a, const double* b, double* c, int n, int m, int k)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid * THREAD_NUM + tid;
const int row = idx / k;
const int column = idx % k;
if (row < n && column < k)
{
double t = 0;
for (int i = 0; i < m; i++)
{
t += a[row * m + i] * b[i * k + column];
}
c[idx] = t;
}
}
void MatMultWithCuda(const double *a, const double *b, double *c, int n, int m, int k){
double *cuda_a, *cuda_b, *cuda_c;
hipMalloc((void**)&cuda_a, sizeof(double)* n * m);
hipMalloc((void**)&cuda_b, sizeof(double)* m * k);
hipMalloc((void**)&cuda_c, sizeof(double)* n * k);
hipMemcpy(cuda_a, a, sizeof(double)* n * m, hipMemcpyHostToDevice);
hipMemcpy(cuda_b, b, sizeof(double)* m * k, hipMemcpyHostToDevice);
BLOCK_NUM = min(n,k) * (max(n,k) + THREAD_NUM - 1) / THREAD_NUM;
hipLaunchKernelGGL(( MatMultKernel), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0 , 0, cuda_a , cuda_b , cuda_c , n, m, k);
hipMemcpy(c, cuda_c, sizeof(double)* n * k, hipMemcpyDeviceToHost);
hipFree(cuda_a);
hipFree(cuda_b);
hipFree(cuda_c);
}
int main()
{
srand(time(NULL));
double *a, *b, *c;
int n, m, k;
scanf("%d%d%d",&n,&m,&k);
a = (double*)malloc(sizeof(double)* n * m);
b = (double*)malloc(sizeof(double)* m * k);
c = (double*)malloc(sizeof(double)* n * k);
srand(time(NULL));
matgen(a, n, m);
matgen(b, m, k);
MatMultWithCuda(a, b, c, n, m, k);
for(int i=0;i<n;i++){
for(int j=0;j<k;j++){
printf("%lf\t",c[i*k+j]);
}
printf("\n");
}
return 0;
} | 1f1a1af27625048ced6cbcd02929bbf0fdb060ee.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#define max(x,y) (x>y?x:y)
#define min(x,y) (x>y?y:x)
#define THREAD_NUM 256
int BLOCK_NUM=0;
void matgen(double* a, int n, int m)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
a[i * m + j] = (double)rand() / RAND_MAX +
(double)rand() / ((long)RAND_MAX * RAND_MAX);
}
}
}
__global__ static void MatMultKernel(const double* a, const double* b, double* c, int n, int m, int k)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid * THREAD_NUM + tid;
const int row = idx / k;
const int column = idx % k;
if (row < n && column < k)
{
double t = 0;
for (int i = 0; i < m; i++)
{
t += a[row * m + i] * b[i * k + column];
}
c[idx] = t;
}
}
void MatMultWithCuda(const double *a, const double *b, double *c, int n, int m, int k){
double *cuda_a, *cuda_b, *cuda_c;
cudaMalloc((void**)&cuda_a, sizeof(double)* n * m);
cudaMalloc((void**)&cuda_b, sizeof(double)* m * k);
cudaMalloc((void**)&cuda_c, sizeof(double)* n * k);
cudaMemcpy(cuda_a, a, sizeof(double)* n * m, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, b, sizeof(double)* m * k, cudaMemcpyHostToDevice);
BLOCK_NUM = min(n,k) * (max(n,k) + THREAD_NUM - 1) / THREAD_NUM;
MatMultKernel<<< BLOCK_NUM, THREAD_NUM, 0 >>>(cuda_a , cuda_b , cuda_c , n, m, k);
cudaMemcpy(c, cuda_c, sizeof(double)* n * k, cudaMemcpyDeviceToHost);
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
}
int main()
{
srand(time(NULL));
double *a, *b, *c;
int n, m, k;
scanf("%d%d%d",&n,&m,&k);
a = (double*)malloc(sizeof(double)* n * m);
b = (double*)malloc(sizeof(double)* m * k);
c = (double*)malloc(sizeof(double)* n * k);
srand(time(NULL));
matgen(a, n, m);
matgen(b, m, k);
MatMultWithCuda(a, b, c, n, m, k);
for(int i=0;i<n;i++){
for(int j=0;j<k;j++){
printf("%lf\t",c[i*k+j]);
}
printf("\n");
}
return 0;
} |
81bbb0f054d174a8c412d1f3b433528a29e6cfca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=64 --blockDim=256
template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n);
template __global__ void reduce1<int>(int *g_idata, int *g_odata, unsigned int n);
#include "common.h"
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
| 81bbb0f054d174a8c412d1f3b433528a29e6cfca.cu | //pass
//--gridDim=64 --blockDim=256
template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n);
template __global__ void reduce1<int>(int *g_idata, int *g_odata, unsigned int n);
#include "common.h"
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
|
73b35103aa4599ee8d2e64a82bcc4a27467054b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test_spalias.cuh"
#include "../spalias.cuh"
#include "../error.cuh"
#include "assert.h"
using gplda::FileLine;
using gplda::f32;
using gplda::i32;
using gplda::u32;
using gplda::u64;
namespace gplda_test {
void test_build_alias() {
u32 table_size = 10;
f32** prob;
u32** alias;
hipMalloc(&prob, sizeof(f32*)) >> GPLDA_CHECK;
hipMalloc(&alias, sizeof(u32*)) >> GPLDA_CHECK;
f32** prob_host[1];
u32** alias_host[1];
f32 prob_host_values[10] = {0.9,0.02,0.01,0.01,0.01, 0.01,0.01,0.01,0.01,0.01};
u32 alias_host_values[10];
hipMalloc(&prob_host[0], table_size * sizeof(f32)) >> GPLDA_CHECK;
hipMalloc(&alias_host[0], table_size * sizeof(u32)) >> GPLDA_CHECK;
hipMemcpy(prob, prob_host, sizeof(f32*), hipMemcpyHostToDevice) >> GPLDA_CHECK;
hipMemcpy(alias, alias_host, sizeof(u32*), hipMemcpyHostToDevice) >> GPLDA_CHECK;
hipMemcpy(prob_host[0], prob_host_values, table_size * sizeof(f32), hipMemcpyHostToDevice) >> GPLDA_CHECK;
hipLaunchKernelGGL(( gplda::build_alias), dim3(1),dim3(64), 0, 0, prob, alias, 10);
hipDeviceSynchronize() >> GPLDA_CHECK;
hipMemcpy(prob_host_values, prob_host[0], table_size * sizeof(f32), hipMemcpyDeviceToHost) >> GPLDA_CHECK;
hipMemcpy(alias_host_values, alias_host[0], table_size * sizeof(u32), hipMemcpyDeviceToHost) >> GPLDA_CHECK;
for(i32 i = 0; i < table_size; ++i) {
assert(prob_host_values[i] <= 0.02f);
assert(alias_host_values[i] == 0);
}
hipFree(prob_host[0]) >> GPLDA_CHECK;
hipFree(alias_host[0]) >> GPLDA_CHECK;
hipFree(prob) >> GPLDA_CHECK;
hipFree(alias) >> GPLDA_CHECK;
}
}
| 73b35103aa4599ee8d2e64a82bcc4a27467054b1.cu | #include "test_spalias.cuh"
#include "../spalias.cuh"
#include "../error.cuh"
#include "assert.h"
using gplda::FileLine;
using gplda::f32;
using gplda::i32;
using gplda::u32;
using gplda::u64;
namespace gplda_test {
void test_build_alias() {
u32 table_size = 10;
f32** prob;
u32** alias;
cudaMalloc(&prob, sizeof(f32*)) >> GPLDA_CHECK;
cudaMalloc(&alias, sizeof(u32*)) >> GPLDA_CHECK;
f32** prob_host[1];
u32** alias_host[1];
f32 prob_host_values[10] = {0.9,0.02,0.01,0.01,0.01, 0.01,0.01,0.01,0.01,0.01};
u32 alias_host_values[10];
cudaMalloc(&prob_host[0], table_size * sizeof(f32)) >> GPLDA_CHECK;
cudaMalloc(&alias_host[0], table_size * sizeof(u32)) >> GPLDA_CHECK;
cudaMemcpy(prob, prob_host, sizeof(f32*), cudaMemcpyHostToDevice) >> GPLDA_CHECK;
cudaMemcpy(alias, alias_host, sizeof(u32*), cudaMemcpyHostToDevice) >> GPLDA_CHECK;
cudaMemcpy(prob_host[0], prob_host_values, table_size * sizeof(f32), cudaMemcpyHostToDevice) >> GPLDA_CHECK;
gplda::build_alias<<<1,64>>>(prob, alias, 10);
cudaDeviceSynchronize() >> GPLDA_CHECK;
cudaMemcpy(prob_host_values, prob_host[0], table_size * sizeof(f32), cudaMemcpyDeviceToHost) >> GPLDA_CHECK;
cudaMemcpy(alias_host_values, alias_host[0], table_size * sizeof(u32), cudaMemcpyDeviceToHost) >> GPLDA_CHECK;
for(i32 i = 0; i < table_size; ++i) {
assert(prob_host_values[i] <= 0.02f);
assert(alias_host_values[i] == 0);
}
cudaFree(prob_host[0]) >> GPLDA_CHECK;
cudaFree(alias_host[0]) >> GPLDA_CHECK;
cudaFree(prob) >> GPLDA_CHECK;
cudaFree(alias) >> GPLDA_CHECK;
}
}
|
f54699fd9e6b0db327390b01861e2e5270edb666.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_norm2_kernel;
int xdim0_tea_leaf_norm2_kernel_h = -1;
int ydim0_tea_leaf_norm2_kernel_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_norm2_kernel*(y))
//user function
__device__
void tea_leaf_norm2_kernel_gpu(const double *x, double * norm) {
*norm = *norm + x[OPS_ACC0(0,0)]*x[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
__global__ void ops_tea_leaf_norm2_kernel(
const double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1 ){
double arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d] = ZERO_double;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_norm2_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_norm2_kernel_gpu(arg0, arg1_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x)*1],arg1_l[d]);
}
// host stub function
void ops_par_loop_tea_leaf_norm2_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,2,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"tea_leaf_norm2_kernel");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_tea_leaf_norm2_kernel_h) {
hipMemcpyToSymbol( xdim0_tea_leaf_norm2_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_norm2_kernel_h = xdim0;
}
#ifdef OPS_MPI
double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_norm2_kernel), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)arg1.data_d,x_size, y_size);
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*1];
}
}
arg1.data = (char *)arg1h;
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
ops_set_dirtybit_device(args, 2);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
| f54699fd9e6b0db327390b01861e2e5270edb666.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_norm2_kernel;
int xdim0_tea_leaf_norm2_kernel_h = -1;
int ydim0_tea_leaf_norm2_kernel_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_norm2_kernel*(y))
//user function
__device__
void tea_leaf_norm2_kernel_gpu(const double *x, double * norm) {
*norm = *norm + x[OPS_ACC0(0,0)]*x[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
__global__ void ops_tea_leaf_norm2_kernel(
const double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1 ){
double arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d] = ZERO_double;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_norm2_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_norm2_kernel_gpu(arg0, arg1_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x)*1],arg1_l[d]);
}
// host stub function
void ops_par_loop_tea_leaf_norm2_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,2,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"tea_leaf_norm2_kernel");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_tea_leaf_norm2_kernel_h) {
cudaMemcpyToSymbol( xdim0_tea_leaf_norm2_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_norm2_kernel_h = xdim0;
}
#ifdef OPS_MPI
double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
ops_tea_leaf_norm2_kernel<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)arg1.data_d,x_size, y_size);
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*1];
}
}
arg1.data = (char *)arg1h;
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
ops_set_dirtybit_device(args, 2);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
4fa824e16cc25faf114fe8766e80915fce52f1e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "params.hpp"
#include <optix.h>
extern "C" static __constant__ Params params;
extern "C" __global__ void __raygen__frontProj() {
const uint3 launch_index = optixGetLaunchIndex();
const float3 &delta = params.delta;
const float3 &min_point = params.min_point;
const float3 &far_away_point = params.far_away_point;
float3 ray_origin =
make_float3(min_point.x + delta.x * launch_index.x,
min_point.y + delta.y * launch_index.y, far_away_point.z);
float3 ray_direction = make_float3(0.0, 0.0, 1.0);
float tmin = 0.0f;
float tmax = delta.z + 100.0;
float ray_time = 0.0f;
OptixVisibilityMask visibilityMask = 255;
unsigned int rayFlags = OPTIX_RAY_FLAG_DISABLE_ANYHIT;
unsigned int SBToffset = 0;
unsigned int SBTstride = 0;
unsigned int missSBTIndex = 0;
unsigned int payload = 0;
optixTrace(params.handle, ray_origin, ray_direction, tmin, tmax, ray_time,
visibilityMask, rayFlags, SBToffset, SBTstride, missSBTIndex,
payload);
unsigned int idx = launch_index.x + launch_index.y * params.width;
params.output[idx] = __uint_as_float(payload);
}
// extern "C" __global__ void __miss__frontProj() {}
// extern "C" __global__ void __anyhit__frontProj() {}
extern "C" __global__ void __closesthit__frontProj() {
float curr_tmax = optixGetRayTmax();
optixSetPayload_0(__float_as_uint(curr_tmax));
}
| 4fa824e16cc25faf114fe8766e80915fce52f1e5.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "params.hpp"
#include <optix.h>
extern "C" static __constant__ Params params;
extern "C" __global__ void __raygen__frontProj() {
const uint3 launch_index = optixGetLaunchIndex();
const float3 &delta = params.delta;
const float3 &min_point = params.min_point;
const float3 &far_away_point = params.far_away_point;
float3 ray_origin =
make_float3(min_point.x + delta.x * launch_index.x,
min_point.y + delta.y * launch_index.y, far_away_point.z);
float3 ray_direction = make_float3(0.0, 0.0, 1.0);
float tmin = 0.0f;
float tmax = delta.z + 100.0;
float ray_time = 0.0f;
OptixVisibilityMask visibilityMask = 255;
unsigned int rayFlags = OPTIX_RAY_FLAG_DISABLE_ANYHIT;
unsigned int SBToffset = 0;
unsigned int SBTstride = 0;
unsigned int missSBTIndex = 0;
unsigned int payload = 0;
optixTrace(params.handle, ray_origin, ray_direction, tmin, tmax, ray_time,
visibilityMask, rayFlags, SBToffset, SBTstride, missSBTIndex,
payload);
unsigned int idx = launch_index.x + launch_index.y * params.width;
params.output[idx] = __uint_as_float(payload);
}
// extern "C" __global__ void __miss__frontProj() {}
// extern "C" __global__ void __anyhit__frontProj() {}
extern "C" __global__ void __closesthit__frontProj() {
float curr_tmax = optixGetRayTmax();
optixSetPayload_0(__float_as_uint(curr_tmax));
}
|
fa861c5b11bfd2640166e3682d610411fe4d6227.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define E 2.71828182845904523536
__global__ void euler_gpu(float * array, float y0, float dt, int n){
// Initial condition
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < n) {
float initial_value = y0;
for(size_t i = 1; i < tId; i++){
initial_value = initial_value + dt*powf(E, -dt*i);
};
array[tId] = initial_value;
};
};
int main(int argc, char const *argv[]){
hipEvent_t start, stop;
float dts[6] = {0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001};
float y0 = -1.0;
int block_size = 256;
for(size_t i = 0; i < 6; i++){
int n = (int)(10/dts[i]);
float elapsed=0;
double error = 0;
float * resultados = (float *) malloc(n * sizeof(float));
float * d_r;
int grid_size = (int) ceil((float)n / block_size);
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc(&d_r, n * sizeof(float));
hipMemcpy(d_r, resultados, n * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( euler_gpu), dim3(grid_size), dim3(block_size), 0, 0, d_r, y0, dts[i], n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(resultados, d_r, n * sizeof(float), hipMemcpyDeviceToHost);
for(int g = 0; g < n; g++){
float real = -powf(E, -dts[i]*g);
error = error + powf((resultados[g]-real),2);
}
printf("Executed with %f dt\n", dts[i]);
printf("The elapsed time in gpu was %.2f ms \n", elapsed);
printf("Mean squared error: %.16f \n", error/n);
free(resultados);
hipFree(d_r);
}
return 0;
};
| fa861c5b11bfd2640166e3682d610411fe4d6227.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define E 2.71828182845904523536
__global__ void euler_gpu(float * array, float y0, float dt, int n){
// Initial condition
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < n) {
float initial_value = y0;
for(size_t i = 1; i < tId; i++){
initial_value = initial_value + dt*powf(E, -dt*i);
};
array[tId] = initial_value;
};
};
int main(int argc, char const *argv[]){
cudaEvent_t start, stop;
float dts[6] = {0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001};
float y0 = -1.0;
int block_size = 256;
for(size_t i = 0; i < 6; i++){
int n = (int)(10/dts[i]);
float elapsed=0;
double error = 0;
float * resultados = (float *) malloc(n * sizeof(float));
float * d_r;
int grid_size = (int) ceil((float)n / block_size);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&d_r, n * sizeof(float));
cudaMemcpy(d_r, resultados, n * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
euler_gpu<<<grid_size, block_size>>>(d_r, y0, dts[i], n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(resultados, d_r, n * sizeof(float), cudaMemcpyDeviceToHost);
for(int g = 0; g < n; g++){
float real = -powf(E, -dts[i]*g);
error = error + powf((resultados[g]-real),2);
}
printf("Executed with %f dt\n", dts[i]);
printf("The elapsed time in gpu was %.2f ms \n", elapsed);
printf("Mean squared error: %.16f \n", error/n);
free(resultados);
cudaFree(d_r);
}
return 0;
};
|
2c1ad6fd8dd7324be74d58609c33700c343cbede.hip | // !!! This is a file automatically generated by hipify!!!
#include "TW_paDIC_cuFFTCC2D.h"
#include <iostream>
#include <stdexcept>
#include "TW_MemManager.h"
#include "TW_utils.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
namespace TW{
namespace paDIC{
// !-----------------------------CUDA Kernel Functions -----------------------------------
/// \brief Compute complex numbers' multiplication and scale for a whole set of subsets used
/// in the parallel FFTCC algoirthm to ultimately acclerate the computation.
///
/// \param *w_a array which hold the FFT-transformed values for all the subsets in refImg
/// \param *w_b array which hold the FFT-transformed values for all the subsets in tarImg
/// \param m_iFFTSubH subset height of the FFT-CC subset
/// \param m_iFFTSubW subset width of the FFT-CC subset
/// \param m_dModf normalization parameter of refImg
/// \param m_dModg normalization parameter of tarImg
/// \param *w_c the result array
__global__ void complexMulandScale_kernel(// Inputs
const cudafftComplex *w_a, const cudafftComplex *w_b,
int_t m_iFFTSubH, int_t m_iFFTSubW,
real_t *m_dModf, real_t *m_dModg,
// Output
cudafftComplex*w_c)
{
auto tid = threadIdx.x;
auto bid = blockIdx.x;
auto dim = blockDim.x;
auto size = m_iFFTSubW * (m_iFFTSubH / 2 + 1);
const cudafftComplex * a = w_a + bid*size;
const cudafftComplex * b = w_b + bid*size;
cudafftComplex * c = w_c + bid*size;
for (auto i = tid; i < size; i += dim)
{
c[i] = ComplexScale(ComplexMul(a[i], b[i]),
1.0 / (sqrt(m_dModf[bid] * m_dModg[bid]) * m_iFFTSubW * m_iFFTSubH));
}
}
/// \brief Using block-level parallel reduction algorithm to compute the maximum ZNCC values for
/// each subset in parallel. Each thread block is responsible for the calculation of one subset.
///
///
/// \param *w_Subset
__global__ void findMax(real_t*w_SubsetC,
int_t m_iFFTSubH, int_t m_iFFTSubW,
int_t m_iSubsetX, int_t m_iSubsetY,
//return val
real_t *m_fU, real_t *m_fV,
real_t *m_dZNCC)
{
auto tid = threadIdx.x;
auto dim = blockDim.x;
auto bid = blockIdx.x;
__shared__ real_t sdata[BLOCK_SIZE_64];
__shared__ int_t sind[BLOCK_SIZE_64];
auto size = m_iFFTSubW * m_iFFTSubH;
real_t *m_SubsetC = w_SubsetC + bid*(m_iFFTSubW * m_iFFTSubH);
real_t data = m_SubsetC[tid];
auto ind = tid;
for (auto id = tid + dim; id<size; id += dim)
{
if (data<m_SubsetC[id])
{
data = m_SubsetC[id];
ind = id;
}
}
reduceToMaxBlock<BLOCK_SIZE_64, real_t>(sdata, sind, data, ind, tid);
ind = sind[0];
int_t peakx = ind%m_iFFTSubW;
int_t peaky = ind / m_iFFTSubW;
if (peakx>m_iSubsetX)
peakx -= m_iFFTSubW;
if (peaky>m_iSubsetY)
peaky -= m_iFFTSubH;
if (tid == 0)
{
m_fU[bid] = real_t(peakx);
m_fV[bid] = real_t(peaky);
m_dZNCC[bid] = sdata[0];
//m_dZNCC[bid] = data;
}
}
__global__ void cufft_prepare_kernel(// Inputs
int_t *m_dPXY,
uchar1 *m_dR, uchar1 *m_dT,
int_t m_iFFTSubH, int_t m_iFFTSubW,
int_t m_iSubsetX, int_t m_iSubsetY,
int_t m_iHeight, int_t m_iWidth,
// Outputs
real_t *w_Subset1, real_t * w_Subset2,
real_t *m_dMod1, real_t *m_dMod2)
{
__shared__ real_t sm[BLOCK_SIZE_64];
auto bid = blockIdx.x;
auto dim = blockDim.x;
auto tid = threadIdx.x;
real_t d_tempt;
real_t d_sumR, d_sumT;
real_t d_aveR, d_aveT;
d_sumR = 0;
d_sumT = 0;
real_t *m_Subset1 = w_Subset1 + bid*(m_iFFTSubW * m_iFFTSubH);
real_t *m_Subset2 = w_Subset2 + bid*(m_iFFTSubW * m_iFFTSubH);
auto size = m_iFFTSubH*m_iFFTSubW;
for (auto id = tid; id<size; id += dim)
{
int_t l = id / m_iFFTSubW;
int_t m = id % m_iFFTSubW;
d_tempt = (real_t)m_dR[(int_t(m_dPXY[bid * 2] - m_iSubsetY + l))*m_iWidth + int_t(m_dPXY[bid * 2 + 1] - m_iSubsetX + m)].x;
m_Subset1[id] = d_tempt;
d_sumR += d_tempt / size;
d_tempt = (real_t)m_dT[(int_t(m_dPXY[bid * 2] - m_iSubsetY + l))*m_iWidth + int_t(m_dPXY[bid * 2 + 1] - m_iSubsetX + m)].x;
m_Subset2[id] = d_tempt;
d_sumT += d_tempt / size;
}
/*d_aveR = blockReduceSum<BLOCK_SIZE_256, float>(d_sumR);
d_aveT = blockReduceSum<BLOCK_SIZE_256, float>(d_sumT);*/
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumR, tid);
d_aveR = sm[0];
__syncthreads();
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumT, tid);
d_aveT = sm[0];
__syncthreads();
d_sumR = 0;
d_sumT = 0;
for (auto id = tid; id<size; id += dim)
{
d_tempt = m_Subset1[id] - d_aveR;
m_Subset1[id] = d_tempt;
d_sumR += pow(d_tempt, 2);
d_tempt = m_Subset2[id] - d_aveT;
m_Subset2[id] = d_tempt;
d_sumT += pow(d_tempt, 2);
}
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumR, tid);
if (tid == 0)
d_aveR = sm[0];
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumT, tid);
if (tid == 0)
d_aveT = sm[0];
if (tid == 0)
{
m_dMod1[bid] = d_aveR;
m_dMod2[bid] = d_aveT;
}
}
// ------------------------------CUDA Kernel Functions End-------------------------------!
cuFFTCC2D::cuFFTCC2D(const int_t iROIWidth, const int_t iROIHeight,
const int_t iSubsetX, const int_t iSubsetY,
const int_t iGridSpaceX, const int_t iGridSpaceY,
const int_t iMarginX, const int_t iMarginY)
: Fftcc2D(iROIWidth, iROIHeight,
iSubsetX, iSubsetY,
iGridSpaceX, iGridSpaceY,
iMarginX, iMarginY)
, isLowLevelApiCalled(false)
, isDestroyed(false)
{
if (!recomputeNumPOI())
throw std::logic_error("Number of POIs is below 0!");
}
cuFFTCC2D::cuFFTCC2D(const int_t iImgWidth, const int_t iImgHeight,
const int_t iROIWidth, const int_t iROIHeight,
const int_t iStartX, const int_t iStartY,
const int_t iSubsetX, const int_t iSubsetY,
const int_t iGridSpaceX, const int_t iGridSpaceY,
const int_t iMarginX, const int_t iMarginY)
: Fftcc2D(iImgWidth, iImgHeight,
iStartX, iStartY,
iROIWidth, iROIHeight,
iSubsetX, iSubsetY,
iGridSpaceX, iGridSpaceY,
iMarginX, iMarginY)
, isLowLevelApiCalled(false)
, isDestroyed(false)
{
if (!recomputeNumPOI())
throw std::logic_error("Number of POIs is below 0!");
}
cuFFTCC2D::~cuFFTCC2D()
{
if(!isDestroyed)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
hipfftDestroy(g_cuHandle.m_forwardPlanXY);
hipfftDestroy(g_cuHandle.m_reversePlanXY);
}
}
void cuFFTCC2D::InitializeFFTCC(// Output
real_t**& fU,
real_t**& fV,
real_t**& fZNCC,
// Input
const cv::Mat& refImg)
{
//!- Check if the low level api is called or not
if (isLowLevelApiCalled)
{
std::cout << "The low-level GPU APIs are already initialized!\n";
return;
}
//!- Precompute the POI postions, since this is invariant during the entire
// computation.
//!- Determine whether the whole image or the ROI is used
if(!m_isWholeImgUsed)
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
else
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iStartX, m_iStartY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
int_t iPOINum = GetNumPOIs();
//!- Allocate host memory
hcreateptr<real_t>(fU, m_iNumPOIY, m_iNumPOIX);
hcreateptr<real_t>(fV, m_iNumPOIY, m_iNumPOIX);
hcreateptr<real_t>(fZNCC, m_iNumPOIY, m_iNumPOIX);
int_t iROISize = GetROISize();
int_t iFFTSubW = m_iSubsetX * 2, iFFTSubH = m_iSubsetY * 2;
int_t iFFTSize = iFFTSubW * iFFTSubH;
int_t iFFTFreqSize = iFFTSubW * (iFFTSubH / 2 + 1);
//!- Allocate device memory
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fRefImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fTarImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fU,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fV,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fZNCC,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDom1,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDom2,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDomfg,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
//!- Initialize the CUFFT plan
int dim[2] = { iFFTSubW, iFFTSubH };
int idim[2] = { iFFTSubW, iFFTSubH };
int odim[2] = { iFFTSubW, (iFFTSubH / 2 + 1) };
hipfftPlanMany(&(g_cuHandle.m_forwardPlanXY),
2, dim,
idim, 1, iFFTSize,
odim, 1, iFFTFreqSize,
HIPFFT_R2C, iPOINum);
hipfftPlanMany(&(g_cuHandle.m_reversePlanXY),
2, dim,
odim, 1, iFFTFreqSize,
idim, 1, iFFTSize,
HIPFFT_C2R, iPOINum);
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubset1,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubset2,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubsetC,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fMod1,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fMod2,
sizeof(real_t)*iPOINum));
hipDeviceSynchronize();
}
void cuFFTCC2D::ComputeFFTCC(// Output
real_t**& fU,
real_t**& fV,
real_t**& fZNCC,
// Input
const cv::Mat& tarImg)
{
//if(tarImg.cols != )
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.cols*tarImg.rows,
hipMemcpyHostToDevice));
auto iFFTSubW = m_iSubsetX * 2;
auto iFFTSubH = m_iSubsetY * 2;
auto iPOINum = GetNumPOIs();
if(!m_isWholeImgUsed)
hipLaunchKernelGGL(( cufft_prepare_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_64) , 0, 0, g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iROIHeight, m_iROIWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
else
hipLaunchKernelGGL(( cufft_prepare_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_64) , 0, 0, g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iImgHeight, m_iImgWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
getLastCudaError("Error in calling cufft_prepare_kernel");
#ifdef TW_USE_DOUBLE
hipfftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
hipfftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#else
hipfftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
hipfftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#endif // TW_USE_DOUBLE
hipLaunchKernelGGL(( complexMulandScale_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_128) , 0, 0, g_cuHandle.m_dev_FreqDom1,
g_cuHandle.m_dev_FreqDom2,
iFFTSubH, iFFTSubW,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2,
g_cuHandle.m_dev_FreqDomfg);
#ifdef TW_USE_DOUBLE
hipfftExecZ2D(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#else
hipfftExecC2R(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#endif // TW_USE_DOUBLE
hipLaunchKernelGGL(( findMax) , dim3(iPOINum), dim3(BLOCK_SIZE_64) , 0, 0, g_cuHandle.m_d_fSubsetC,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
g_cuHandle.m_d_fU,
g_cuHandle.m_d_fV,
g_cuHandle.m_d_fZNCC);
checkCudaErrors(hipMemcpy(fU[0],
g_cuHandle.m_d_fU,
sizeof(real_t)*iPOINum,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(fV[0],
g_cuHandle.m_d_fV,
sizeof(real_t)*iPOINum,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(fZNCC[0],
g_cuHandle.m_d_fZNCC,
sizeof(real_t)*iPOINum,
hipMemcpyDeviceToHost));
}
void cuFFTCC2D::DestroyFFTCC(real_t**& fU,
real_t**& fV,
real_t**& fZNCC)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_d_iPOIXY);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
hipfftDestroy(g_cuHandle.m_forwardPlanXY);
hipfftDestroy(g_cuHandle.m_reversePlanXY);
hdestroyptr<real_t>(fU);
hdestroyptr<real_t>(fV);
hdestroyptr<real_t>(fZNCC);
isDestroyed = true;
}
void cuFFTCC2D::cuInitializeFFTCC(// Output
real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC,
// Input
const cv::Mat& refImg)
{
isLowLevelApiCalled = true;
//!- Precompute the POI postions, since this is invariant during the entire
// computation.
//!- Determine whether the whole image or the ROI is used
if(!m_isWholeImgUsed)
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
else
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iStartX, m_iStartY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
int_t iPOINum = GetNumPOIs();
int_t iROISize = GetROISize();
int_t iFFTSubW = m_iSubsetX * 2, iFFTSubH = m_iSubsetY * 2;
int_t iFFTSize = iFFTSubW * iFFTSubH;
int_t iFFTFreqSize = iFFTSubW * (iFFTSubH / 2 + 1);
//!- Allocate device memory
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fRefImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fTarImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
//!- Use these three parameters instead of the ones in g_cuHandle
checkCudaErrors(hipMalloc((void**)&f_d_U,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&f_d_V,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&f_d_ZNCC,
sizeof(real_t)*iPOINum));
//!- Initialize the CUFFT plan
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDom1,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDom2,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_dev_FreqDomfg,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
int dim[2] = { iFFTSubW, iFFTSubH };
int idim[2] = { iFFTSubW, iFFTSubH };
int odim[2] = { iFFTSubW, (iFFTSubH / 2 + 1) };
hipfftPlanMany(&(g_cuHandle.m_forwardPlanXY),
2, dim,
idim, 1, iFFTSize,
odim, 1, iFFTFreqSize,
HIPFFT_R2C, iPOINum);
hipfftPlanMany(&(g_cuHandle.m_reversePlanXY),
2, dim,
odim, 1, iFFTFreqSize,
idim, 1, iFFTSize,
HIPFFT_C2R, iPOINum);
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubset1,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubset2,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fSubsetC,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fMod1,
sizeof(real_t)*iPOINum));
checkCudaErrors(hipMalloc((void**)&g_cuHandle.m_d_fMod2,
sizeof(real_t)*iPOINum));
}
void cuFFTCC2D::cuComputeFFTCC(// Output
real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC,
// Input
const cv::Mat& tarImg)
{
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.rows*tarImg.cols,
hipMemcpyHostToDevice));
auto iFFTSubW = m_iSubsetX * 2;
auto iFFTSubH = m_iSubsetY * 2;
auto iPOINum = GetNumPOIs();
hipEventRecord(start);
if(!m_isWholeImgUsed)
hipLaunchKernelGGL(( cufft_prepare_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_64) , 0, 0, g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iROIHeight, m_iROIWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
else
hipLaunchKernelGGL(( cufft_prepare_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_64), 0, 0, g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iImgHeight, m_iImgWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
getLastCudaError("Error in calling cufft_prepare_kernel");
#ifdef TW_USE_DOUBLE
hipfftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
hipfftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#else
hipfftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
hipfftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#endif // TW_USE_DOUBLE
hipLaunchKernelGGL(( complexMulandScale_kernel) , dim3(iPOINum), dim3(BLOCK_SIZE_128) , 0, 0, g_cuHandle.m_dev_FreqDom1,
g_cuHandle.m_dev_FreqDom2,
iFFTSubH, iFFTSubW,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2,
g_cuHandle.m_dev_FreqDomfg);
getLastCudaError("Error in calling complexMulandScale_kernel");
#ifdef TW_USE_DOUBLE
hipfftExecZ2D(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#else
hipfftExecC2R(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#endif // TW_USE_DOUBLE
//!- Use the three arguments instead of the ones in g_cuHandle member
hipLaunchKernelGGL(( findMax) , dim3(iPOINum), dim3(BLOCK_SIZE_64) , 0, 0, g_cuHandle.m_d_fSubsetC,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
f_d_U,
f_d_V,
f_d_ZNCC);
getLastCudaError("Error in calling findMax_Kernel");
hipEventRecord(end);
hipDeviceSynchronize();
float t_time;
hipEventElapsedTime(&t_time, start, end);
std::cout << "GPU FFT-CC Time is: " << t_time << std::endl;
}
void cuFFTCC2D::cuDestroyFFTCC(real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_d_iPOIXY);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
hipfftDestroy(g_cuHandle.m_forwardPlanXY);
hipfftDestroy(g_cuHandle.m_reversePlanXY);
cudaSafeFree(f_d_U);
cudaSafeFree(f_d_V);
cudaSafeFree(f_d_ZNCC);
isDestroyed = true;
}
void cuFFTCC2D::ResetRefImg(const cv::Mat& refImg)
{
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
hipMemcpyHostToDevice));
}
void cuFFTCC2D::SetTarImg(const cv::Mat& tarImg)
{
checkCudaErrors(hipMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.rows*tarImg.cols,
hipMemcpyHostToDevice));
}
} //!- namespace paDIC
} //!- namespace TW | 2c1ad6fd8dd7324be74d58609c33700c343cbede.cu | #include "TW_paDIC_cuFFTCC2D.h"
#include <iostream>
#include <stdexcept>
#include "TW_MemManager.h"
#include "TW_utils.h"
#include <cuda.h>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
namespace TW{
namespace paDIC{
// !-----------------------------CUDA Kernel Functions -----------------------------------
/// \brief Compute complex numbers' multiplication and scale for a whole set of subsets used
/// in the parallel FFTCC algoirthm to ultimately acclerate the computation.
///
/// \param *w_a array which hold the FFT-transformed values for all the subsets in refImg
/// \param *w_b array which hold the FFT-transformed values for all the subsets in tarImg
/// \param m_iFFTSubH subset height of the FFT-CC subset
/// \param m_iFFTSubW subset width of the FFT-CC subset
/// \param m_dModf normalization parameter of refImg
/// \param m_dModg normalization parameter of tarImg
/// \param *w_c the result array
__global__ void complexMulandScale_kernel(// Inputs
const cudafftComplex *w_a, const cudafftComplex *w_b,
int_t m_iFFTSubH, int_t m_iFFTSubW,
real_t *m_dModf, real_t *m_dModg,
// Output
cudafftComplex*w_c)
{
auto tid = threadIdx.x;
auto bid = blockIdx.x;
auto dim = blockDim.x;
auto size = m_iFFTSubW * (m_iFFTSubH / 2 + 1);
const cudafftComplex * a = w_a + bid*size;
const cudafftComplex * b = w_b + bid*size;
cudafftComplex * c = w_c + bid*size;
for (auto i = tid; i < size; i += dim)
{
c[i] = ComplexScale(ComplexMul(a[i], b[i]),
1.0 / (sqrt(m_dModf[bid] * m_dModg[bid]) * m_iFFTSubW * m_iFFTSubH));
}
}
/// \brief Using block-level parallel reduction algorithm to compute the maximum ZNCC values for
/// each subset in parallel. Each thread block is responsible for the calculation of one subset.
///
///
/// \param *w_Subset
__global__ void findMax(real_t*w_SubsetC,
int_t m_iFFTSubH, int_t m_iFFTSubW,
int_t m_iSubsetX, int_t m_iSubsetY,
//return val
real_t *m_fU, real_t *m_fV,
real_t *m_dZNCC)
{
auto tid = threadIdx.x;
auto dim = blockDim.x;
auto bid = blockIdx.x;
__shared__ real_t sdata[BLOCK_SIZE_64];
__shared__ int_t sind[BLOCK_SIZE_64];
auto size = m_iFFTSubW * m_iFFTSubH;
real_t *m_SubsetC = w_SubsetC + bid*(m_iFFTSubW * m_iFFTSubH);
real_t data = m_SubsetC[tid];
auto ind = tid;
for (auto id = tid + dim; id<size; id += dim)
{
if (data<m_SubsetC[id])
{
data = m_SubsetC[id];
ind = id;
}
}
reduceToMaxBlock<BLOCK_SIZE_64, real_t>(sdata, sind, data, ind, tid);
ind = sind[0];
int_t peakx = ind%m_iFFTSubW;
int_t peaky = ind / m_iFFTSubW;
if (peakx>m_iSubsetX)
peakx -= m_iFFTSubW;
if (peaky>m_iSubsetY)
peaky -= m_iFFTSubH;
if (tid == 0)
{
m_fU[bid] = real_t(peakx);
m_fV[bid] = real_t(peaky);
m_dZNCC[bid] = sdata[0];
//m_dZNCC[bid] = data;
}
}
__global__ void cufft_prepare_kernel(// Inputs
int_t *m_dPXY,
uchar1 *m_dR, uchar1 *m_dT,
int_t m_iFFTSubH, int_t m_iFFTSubW,
int_t m_iSubsetX, int_t m_iSubsetY,
int_t m_iHeight, int_t m_iWidth,
// Outputs
real_t *w_Subset1, real_t * w_Subset2,
real_t *m_dMod1, real_t *m_dMod2)
{
__shared__ real_t sm[BLOCK_SIZE_64];
auto bid = blockIdx.x;
auto dim = blockDim.x;
auto tid = threadIdx.x;
real_t d_tempt;
real_t d_sumR, d_sumT;
real_t d_aveR, d_aveT;
d_sumR = 0;
d_sumT = 0;
real_t *m_Subset1 = w_Subset1 + bid*(m_iFFTSubW * m_iFFTSubH);
real_t *m_Subset2 = w_Subset2 + bid*(m_iFFTSubW * m_iFFTSubH);
auto size = m_iFFTSubH*m_iFFTSubW;
for (auto id = tid; id<size; id += dim)
{
int_t l = id / m_iFFTSubW;
int_t m = id % m_iFFTSubW;
d_tempt = (real_t)m_dR[(int_t(m_dPXY[bid * 2] - m_iSubsetY + l))*m_iWidth + int_t(m_dPXY[bid * 2 + 1] - m_iSubsetX + m)].x;
m_Subset1[id] = d_tempt;
d_sumR += d_tempt / size;
d_tempt = (real_t)m_dT[(int_t(m_dPXY[bid * 2] - m_iSubsetY + l))*m_iWidth + int_t(m_dPXY[bid * 2 + 1] - m_iSubsetX + m)].x;
m_Subset2[id] = d_tempt;
d_sumT += d_tempt / size;
}
/*d_aveR = blockReduceSum<BLOCK_SIZE_256, float>(d_sumR);
d_aveT = blockReduceSum<BLOCK_SIZE_256, float>(d_sumT);*/
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumR, tid);
d_aveR = sm[0];
__syncthreads();
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumT, tid);
d_aveT = sm[0];
__syncthreads();
d_sumR = 0;
d_sumT = 0;
for (auto id = tid; id<size; id += dim)
{
d_tempt = m_Subset1[id] - d_aveR;
m_Subset1[id] = d_tempt;
d_sumR += pow(d_tempt, 2);
d_tempt = m_Subset2[id] - d_aveT;
m_Subset2[id] = d_tempt;
d_sumT += pow(d_tempt, 2);
}
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumR, tid);
if (tid == 0)
d_aveR = sm[0];
reduceBlock<BLOCK_SIZE_64, real_t>(sm, d_sumT, tid);
if (tid == 0)
d_aveT = sm[0];
if (tid == 0)
{
m_dMod1[bid] = d_aveR;
m_dMod2[bid] = d_aveT;
}
}
// ------------------------------CUDA Kernel Functions End-------------------------------!
cuFFTCC2D::cuFFTCC2D(const int_t iROIWidth, const int_t iROIHeight,
const int_t iSubsetX, const int_t iSubsetY,
const int_t iGridSpaceX, const int_t iGridSpaceY,
const int_t iMarginX, const int_t iMarginY)
: Fftcc2D(iROIWidth, iROIHeight,
iSubsetX, iSubsetY,
iGridSpaceX, iGridSpaceY,
iMarginX, iMarginY)
, isLowLevelApiCalled(false)
, isDestroyed(false)
{
if (!recomputeNumPOI())
throw std::logic_error("Number of POIs is below 0!");
}
cuFFTCC2D::cuFFTCC2D(const int_t iImgWidth, const int_t iImgHeight,
const int_t iROIWidth, const int_t iROIHeight,
const int_t iStartX, const int_t iStartY,
const int_t iSubsetX, const int_t iSubsetY,
const int_t iGridSpaceX, const int_t iGridSpaceY,
const int_t iMarginX, const int_t iMarginY)
: Fftcc2D(iImgWidth, iImgHeight,
iStartX, iStartY,
iROIWidth, iROIHeight,
iSubsetX, iSubsetY,
iGridSpaceX, iGridSpaceY,
iMarginX, iMarginY)
, isLowLevelApiCalled(false)
, isDestroyed(false)
{
if (!recomputeNumPOI())
throw std::logic_error("Number of POIs is below 0!");
}
cuFFTCC2D::~cuFFTCC2D()
{
if(!isDestroyed)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
cufftDestroy(g_cuHandle.m_forwardPlanXY);
cufftDestroy(g_cuHandle.m_reversePlanXY);
}
}
void cuFFTCC2D::InitializeFFTCC(// Output
real_t**& fU,
real_t**& fV,
real_t**& fZNCC,
// Input
const cv::Mat& refImg)
{
//!- Check if the low level api is called or not
if (isLowLevelApiCalled)
{
std::cout << "The low-level GPU APIs are already initialized!\n";
return;
}
//!- Precompute the POI postions, since this is invariant during the entire
// computation.
//!- Determine whether the whole image or the ROI is used
if(!m_isWholeImgUsed)
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
else
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iStartX, m_iStartY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
int_t iPOINum = GetNumPOIs();
//!- Allocate host memory
hcreateptr<real_t>(fU, m_iNumPOIY, m_iNumPOIX);
hcreateptr<real_t>(fV, m_iNumPOIY, m_iNumPOIX);
hcreateptr<real_t>(fZNCC, m_iNumPOIY, m_iNumPOIX);
int_t iROISize = GetROISize();
int_t iFFTSubW = m_iSubsetX * 2, iFFTSubH = m_iSubsetY * 2;
int_t iFFTSize = iFFTSubW * iFFTSubH;
int_t iFFTFreqSize = iFFTSubW * (iFFTSubH / 2 + 1);
//!- Allocate device memory
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fRefImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fTarImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fU,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fV,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fZNCC,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDom1,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDom2,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDomfg,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
//!- Initialize the CUFFT plan
int dim[2] = { iFFTSubW, iFFTSubH };
int idim[2] = { iFFTSubW, iFFTSubH };
int odim[2] = { iFFTSubW, (iFFTSubH / 2 + 1) };
cufftPlanMany(&(g_cuHandle.m_forwardPlanXY),
2, dim,
idim, 1, iFFTSize,
odim, 1, iFFTFreqSize,
CUFFT_R2C, iPOINum);
cufftPlanMany(&(g_cuHandle.m_reversePlanXY),
2, dim,
odim, 1, iFFTFreqSize,
idim, 1, iFFTSize,
CUFFT_C2R, iPOINum);
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubset1,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubset2,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubsetC,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fMod1,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fMod2,
sizeof(real_t)*iPOINum));
cudaDeviceSynchronize();
}
void cuFFTCC2D::ComputeFFTCC(// Output
real_t**& fU,
real_t**& fV,
real_t**& fZNCC,
// Input
const cv::Mat& tarImg)
{
//if(tarImg.cols != )
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.cols*tarImg.rows,
cudaMemcpyHostToDevice));
auto iFFTSubW = m_iSubsetX * 2;
auto iFFTSubH = m_iSubsetY * 2;
auto iPOINum = GetNumPOIs();
if(!m_isWholeImgUsed)
cufft_prepare_kernel <<<iPOINum, BLOCK_SIZE_64 >>> (g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iROIHeight, m_iROIWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
else
cufft_prepare_kernel <<<iPOINum, BLOCK_SIZE_64 >>> (g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iImgHeight, m_iImgWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
getLastCudaError("Error in calling cufft_prepare_kernel");
#ifdef TW_USE_DOUBLE
cufftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
cufftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#else
cufftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
cufftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#endif // TW_USE_DOUBLE
complexMulandScale_kernel <<<iPOINum, BLOCK_SIZE_128 >>> (g_cuHandle.m_dev_FreqDom1,
g_cuHandle.m_dev_FreqDom2,
iFFTSubH, iFFTSubW,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2,
g_cuHandle.m_dev_FreqDomfg);
#ifdef TW_USE_DOUBLE
cufftExecZ2D(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#else
cufftExecC2R(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#endif // TW_USE_DOUBLE
findMax <<<iPOINum, BLOCK_SIZE_64 >>> (g_cuHandle.m_d_fSubsetC,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
g_cuHandle.m_d_fU,
g_cuHandle.m_d_fV,
g_cuHandle.m_d_fZNCC);
checkCudaErrors(cudaMemcpy(fU[0],
g_cuHandle.m_d_fU,
sizeof(real_t)*iPOINum,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(fV[0],
g_cuHandle.m_d_fV,
sizeof(real_t)*iPOINum,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(fZNCC[0],
g_cuHandle.m_d_fZNCC,
sizeof(real_t)*iPOINum,
cudaMemcpyDeviceToHost));
}
void cuFFTCC2D::DestroyFFTCC(real_t**& fU,
real_t**& fV,
real_t**& fZNCC)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_d_iPOIXY);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
cufftDestroy(g_cuHandle.m_forwardPlanXY);
cufftDestroy(g_cuHandle.m_reversePlanXY);
hdestroyptr<real_t>(fU);
hdestroyptr<real_t>(fV);
hdestroyptr<real_t>(fZNCC);
isDestroyed = true;
}
void cuFFTCC2D::cuInitializeFFTCC(// Output
real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC,
// Input
const cv::Mat& refImg)
{
isLowLevelApiCalled = true;
//!- Precompute the POI postions, since this is invariant during the entire
// computation.
//!- Determine whether the whole image or the ROI is used
if(!m_isWholeImgUsed)
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
else
cuComputePOIPositions(g_cuHandle.m_d_iPOIXY,
m_iStartX, m_iStartY,
m_iNumPOIX, m_iNumPOIY,
m_iMarginX, m_iMarginY,
m_iSubsetX, m_iSubsetY,
m_iGridSpaceX, m_iGridSpaceY);
int_t iPOINum = GetNumPOIs();
int_t iROISize = GetROISize();
int_t iFFTSubW = m_iSubsetX * 2, iFFTSubH = m_iSubsetY * 2;
int_t iFFTSize = iFFTSubW * iFFTSubH;
int_t iFFTFreqSize = iFFTSubW * (iFFTSubH / 2 + 1);
//!- Allocate device memory
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fRefImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fTarImg,
/*sizeof(uchar)**/refImg.rows*refImg.cols));
//!- Use these three parameters instead of the ones in g_cuHandle
checkCudaErrors(cudaMalloc((void**)&f_d_U,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&f_d_V,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&f_d_ZNCC,
sizeof(real_t)*iPOINum));
//!- Initialize the CUFFT plan
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDom1,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDom2,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_dev_FreqDomfg,
sizeof(cudafftComplex)*iPOINum*iFFTFreqSize));
int dim[2] = { iFFTSubW, iFFTSubH };
int idim[2] = { iFFTSubW, iFFTSubH };
int odim[2] = { iFFTSubW, (iFFTSubH / 2 + 1) };
cufftPlanMany(&(g_cuHandle.m_forwardPlanXY),
2, dim,
idim, 1, iFFTSize,
odim, 1, iFFTFreqSize,
CUFFT_R2C, iPOINum);
cufftPlanMany(&(g_cuHandle.m_reversePlanXY),
2, dim,
odim, 1, iFFTFreqSize,
idim, 1, iFFTSize,
CUFFT_C2R, iPOINum);
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubset1,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubset2,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fSubsetC,
sizeof(real_t)*iPOINum*iFFTSize));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fMod1,
sizeof(real_t)*iPOINum));
checkCudaErrors(cudaMalloc((void**)&g_cuHandle.m_d_fMod2,
sizeof(real_t)*iPOINum));
}
void cuFFTCC2D::cuComputeFFTCC(// Output
real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC,
// Input
const cv::Mat& tarImg)
{
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.rows*tarImg.cols,
cudaMemcpyHostToDevice));
auto iFFTSubW = m_iSubsetX * 2;
auto iFFTSubH = m_iSubsetY * 2;
auto iPOINum = GetNumPOIs();
cudaEventRecord(start);
if(!m_isWholeImgUsed)
cufft_prepare_kernel <<<iPOINum, BLOCK_SIZE_64 >>> (g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iROIHeight, m_iROIWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
else
cufft_prepare_kernel <<<iPOINum, BLOCK_SIZE_64>>> (g_cuHandle.m_d_iPOIXY,
g_cuHandle.m_d_fRefImg,
g_cuHandle.m_d_fTarImg,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
m_iImgHeight, m_iImgWidth,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2);
getLastCudaError("Error in calling cufft_prepare_kernel");
#ifdef TW_USE_DOUBLE
cufftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
cufftExecD2Z(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#else
cufftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset1,
g_cuHandle.m_dev_FreqDom1);
cufftExecR2C(g_cuHandle.m_forwardPlanXY,
g_cuHandle.m_d_fSubset2,
g_cuHandle.m_dev_FreqDom2);
#endif // TW_USE_DOUBLE
complexMulandScale_kernel <<<iPOINum, BLOCK_SIZE_128 >>> (g_cuHandle.m_dev_FreqDom1,
g_cuHandle.m_dev_FreqDom2,
iFFTSubH, iFFTSubW,
g_cuHandle.m_d_fMod1,
g_cuHandle.m_d_fMod2,
g_cuHandle.m_dev_FreqDomfg);
getLastCudaError("Error in calling complexMulandScale_kernel");
#ifdef TW_USE_DOUBLE
cufftExecZ2D(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#else
cufftExecC2R(g_cuHandle.m_reversePlanXY,
g_cuHandle.m_dev_FreqDomfg,
g_cuHandle.m_d_fSubsetC);
#endif // TW_USE_DOUBLE
//!- Use the three arguments instead of the ones in g_cuHandle member
findMax <<<iPOINum, BLOCK_SIZE_64 >>> (g_cuHandle.m_d_fSubsetC,
iFFTSubH, iFFTSubW,
m_iSubsetX, m_iSubsetY,
f_d_U,
f_d_V,
f_d_ZNCC);
getLastCudaError("Error in calling findMax_Kernel");
cudaEventRecord(end);
cudaDeviceSynchronize();
float t_time;
cudaEventElapsedTime(&t_time, start, end);
std::cout << "GPU FFT-CC Time is: " << t_time << std::endl;
}
void cuFFTCC2D::cuDestroyFFTCC(real_t*& f_d_U,
real_t*& f_d_V,
real_t*& f_d_ZNCC)
{
cudaSafeFree(g_cuHandle.m_d_fRefImg);
cudaSafeFree(g_cuHandle.m_d_fTarImg);
cudaSafeFree(g_cuHandle.m_d_fMod1);
cudaSafeFree(g_cuHandle.m_d_fMod2);
cudaSafeFree(g_cuHandle.m_d_iPOIXY);
cudaSafeFree(g_cuHandle.m_dev_FreqDom1);
cudaSafeFree(g_cuHandle.m_dev_FreqDom2);
cudaSafeFree(g_cuHandle.m_dev_FreqDomfg);
cudaSafeFree(g_cuHandle.m_d_fSubset1);
cudaSafeFree(g_cuHandle.m_d_fSubset2);
cudaSafeFree(g_cuHandle.m_d_fSubsetC);
cudaSafeFree(g_cuHandle.m_d_fZNCC);
cudaSafeFree(g_cuHandle.m_d_fU);
cudaSafeFree(g_cuHandle.m_d_fV);
cufftDestroy(g_cuHandle.m_forwardPlanXY);
cufftDestroy(g_cuHandle.m_reversePlanXY);
cudaSafeFree(f_d_U);
cudaSafeFree(f_d_V);
cudaSafeFree(f_d_ZNCC);
isDestroyed = true;
}
void cuFFTCC2D::ResetRefImg(const cv::Mat& refImg)
{
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fRefImg,
(void*)refImg.data,
/*sizeof(uchar)**/refImg.rows*refImg.cols,
cudaMemcpyHostToDevice));
}
void cuFFTCC2D::SetTarImg(const cv::Mat& tarImg)
{
checkCudaErrors(cudaMemcpy(g_cuHandle.m_d_fTarImg,
(void*)tarImg.data,
/*sizeof(uchar)**/tarImg.rows*tarImg.cols,
cudaMemcpyHostToDevice));
}
} //!- namespace paDIC
} //!- namespace TW |
e6f485efcb72b580c9b0fb2438bc73a36128264c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/sum_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SumPoolingForwardGPU(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int channels,
const int inner_num_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / channels;
const int c = index % channels;
top_data[index] = 0;
for (int j = 0; j < inner_num_; j++) {
top_data[i * channels + c] += bottom_data[i * channels * inner_num_ + c * inner_num_ + j];
}
}
}
template <typename Dtype>
void SumPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int channels = bottom[0]->shape(1);
const int inner_num_ = bottom[0]->count() / (channels * outer_num_);
const int nthreads = outer_num_ * channels;
hipLaunchKernelGGL(( SumPoolingForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, top_data,
channels, inner_num_);
}
template <typename Dtype>
__global__ void SumPoolingBackwardGPU(const int nthreads,
const Dtype* top_diff, Dtype* bottom_diff,
const int channels, const int inner_num_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / (channels * inner_num_);
const int c = (index % (channels * inner_num_)) / inner_num_;
bottom_diff[index] = top_diff[i * channels + c];
}
}
template <typename Dtype>
void SumPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int channels = bottom[0]->shape(1);
const int inner_num_ = bottom[0]->count() / (channels * outer_num_);
// Gradient with respect to bottom data
const int nthreads = outer_num_ * channels * inner_num_;
hipLaunchKernelGGL(( SumPoolingBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_diff, bottom_diff, channels, inner_num_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SumPoolingLayer);
} // namespace caffe
| e6f485efcb72b580c9b0fb2438bc73a36128264c.cu | #include "caffe/layers/sum_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SumPoolingForwardGPU(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int channels,
const int inner_num_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / channels;
const int c = index % channels;
top_data[index] = 0;
for (int j = 0; j < inner_num_; j++) {
top_data[i * channels + c] += bottom_data[i * channels * inner_num_ + c * inner_num_ + j];
}
}
}
template <typename Dtype>
void SumPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int channels = bottom[0]->shape(1);
const int inner_num_ = bottom[0]->count() / (channels * outer_num_);
const int nthreads = outer_num_ * channels;
SumPoolingForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, top_data,
channels, inner_num_);
}
template <typename Dtype>
__global__ void SumPoolingBackwardGPU(const int nthreads,
const Dtype* top_diff, Dtype* bottom_diff,
const int channels, const int inner_num_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / (channels * inner_num_);
const int c = (index % (channels * inner_num_)) / inner_num_;
bottom_diff[index] = top_diff[i * channels + c];
}
}
template <typename Dtype>
void SumPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int channels = bottom[0]->shape(1);
const int inner_num_ = bottom[0]->count() / (channels * outer_num_);
// Gradient with respect to bottom data
const int nthreads = outer_num_ * channels * inner_num_;
SumPoolingBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_diff, bottom_diff, channels, inner_num_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SumPoolingLayer);
} // namespace caffe
|
61496c4d799f6f98196d5e44e691a53a5154f1d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "Prerequisites.cuh"
#include "Alignment.cuh"
#include "Correlation.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////////////
//Equivalent of tom_os3_alignStack//
////////////////////////////////////
void d_Align2D(tfloat* d_input, tfloat* d_targets, int3 dims, int numtargets, tfloat3* h_params, int* h_membership, tfloat* h_scores, int maxtranslation, tfloat maxrotation, int iterations, T_ALIGN_MODE mode, int batch)
{
int polarboost = 100; //Sub-pixel precision for polar correlation peak
int padding = max(dims.x / 2 - (int)((tfloat)1 / (sin(min(ToRad(90), ToRad(45) + maxrotation)) / sin(ToRad(45))) * (tfloat)(dims.x / 2)), maxtranslation);
int3 effdims = toInt3(dims.x - padding * 2, dims.y - padding * 2, 1);
int3 polardims = toInt3(GetCart2PolarSize(toInt2(effdims.x, effdims.y)));
#pragma region Targets
tcomplex* d_targetscartFFT;
hipMalloc((void**)&d_targetscartFFT, ElementsFFT(effdims) * numtargets * sizeof(tcomplex));
tcomplex* d_targetspolarFFT;
hipMalloc((void**)&d_targetspolarFFT, ElementsFFT(polardims) * numtargets * sizeof(tcomplex));
{
tfloat* d_targetscart;
hipMalloc((void**)&d_targetscart, Elements(effdims) * numtargets * sizeof(tfloat));
tfloat* d_targetspolar;
hipMalloc((void**)&d_targetspolar, Elements(polardims) * numtargets * sizeof(tfloat));
d_Extract(d_targets, d_targetscart, dims, effdims, toInt3(dims.x / 2, dims.y / 2, 0), numtargets);
d_Cart2Polar(d_targetscart, d_targetspolar, toInt2(effdims.x, effdims.y), T_INTERP_CUBIC, numtargets);
d_NormMonolithic(d_targetscart, d_targetscart, Elements(effdims), NULL, T_NORM_MEAN01STD, numtargets);
d_NormMonolithic(d_targetspolar, d_targetspolar, Elements(polardims), NULL, T_NORM_MEAN01STD, numtargets);
d_FFTR2C(d_targetscart, d_targetscartFFT, 2, effdims, numtargets);
d_FFTR2C(d_targetspolar, d_targetspolarFFT, 2, polardims, numtargets);
hipFree(d_targetspolar);
hipFree(d_targetscart);
}
#pragma endregion
#pragma region Atlas
int3 atlasdims = toInt3(1, 1, 1);
int2 atlasprimitives = toInt2(1, 1);
int2* h_atlascoords = (int2*)malloc(batch * sizeof(int2));
tfloat* d_atlas = d_MakeAtlas(d_input, toInt3(dims.x, dims.y, batch), atlasdims, atlasprimitives, h_atlascoords);
int atlasrow = atlasprimitives.x;
#pragma endregion
#pragma region Masks
tfloat* d_maskcart = CudaMallocValueFilled(Elements(effdims), (tfloat)1 / (tfloat)Elements(effdims));
tfloat* d_maskpolar;
hipMalloc((void**)&d_maskpolar, polardims.y * polarboost * sizeof(tfloat));
{
tfloat fmaxtranslation = (tfloat)(maxtranslation + 1);
d_SphereMask(d_maskcart, d_maskcart, effdims, &fmaxtranslation, (tfloat)1, (tfloat3*)NULL);
tfloat* h_maskpolar = MallocValueFilled(polardims.y * polarboost, (tfloat)0);
h_maskpolar[0] = (tfloat)1;
for (int a = 1; a < (int)ceil(maxrotation / PI2 * (tfloat)(polardims.y * polarboost)); a++)
{
h_maskpolar[a] = (tfloat)1;
h_maskpolar[polardims.y * polarboost - a] = (tfloat)1;
}
hipMemcpy(d_maskpolar, h_maskpolar, polardims.y * polarboost * sizeof(tfloat), hipMemcpyHostToDevice);
free(h_maskpolar);
}
#pragma endregion
tfloat* d_datacart;
hipMalloc((void**)&d_datacart, Elements(effdims) * batch * sizeof(tfloat));
tfloat* d_datapolar;
hipMalloc((void**)&d_datapolar, Elements(polardims) * batch * sizeof(tfloat));
tcomplex* d_datacartFFT;
hipMalloc((void**)&d_datacartFFT, ElementsFFT(effdims) * batch * sizeof(tcomplex));
tcomplex* d_datapolarFFT;
hipMalloc((void**)&d_datapolarFFT, ElementsFFT(polardims) * batch * sizeof(tcomplex));
tfloat* d_polarextract;
hipMalloc((void**)&d_polarextract, polardims.y * batch * sizeof(tfloat));
tfloat* d_polarextractboost;
hipMalloc((void**)&d_polarextractboost, polardims.y * polarboost * batch * sizeof(tfloat));
tfloat3* d_peakpos;
hipMalloc((void**)&d_peakpos, batch * sizeof(tfloat3));
tfloat* d_peakvalues;
hipMalloc((void**)&d_peakvalues, batch * sizeof(tfloat));
tfloat* h_scoresrot = MallocValueFilled(batch * numtargets, (tfloat)0);
tfloat* h_scorestrans = MallocValueFilled(batch * numtargets, (tfloat)0);
tfloat3* h_intermedparams = (tfloat3*)malloc(batch * numtargets * sizeof(tfloat3));
for (int t = 0; t < numtargets; t++)
memcpy(h_intermedparams + t * batch, h_params, batch * sizeof(tfloat3));
tfloat3* h_peakpos = (tfloat3*)malloc(batch * sizeof(tfloat3));
tfloat* h_peakvalues = (tfloat*)malloc(batch * sizeof(tfloat));
tfloat2* h_scale = (tfloat2*)MallocValueFilled(batch * 2, (tfloat)1);
tfloat* h_rotation = (tfloat*)malloc(batch * sizeof(tfloat));
tfloat2* h_translation = (tfloat2*)malloc(batch * sizeof(tfloat2));
tfloat2* d_translation = (tfloat2*)CudaMallocValueFilled(batch * 2, (tfloat)0);
hipfftHandle planforwTrans, planbackTrans;
hipfftHandle planforwRot, planbackRot;
if (mode & T_ALIGN_ROT)
{
planforwRot = d_FFTR2CGetPlan(2, polardims, batch);
planbackRot = d_IFFTC2RGetPlan(2, polardims, batch);
}
if (mode & T_ALIGN_ROT)
{
planforwTrans = d_FFTR2CGetPlan(2, effdims, batch);
planbackTrans = d_IFFTC2RGetPlan(2, effdims, batch);
}
if (iterations == 0 && mode == T_ALIGN_BOTH)
{
for (double a = (double)-maxrotation; a < (double)maxrotation; a += (double)ToRad(0.5))
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
{
h_rotation[b] = (tfloat)a;
h_translation[b] = tfloat2((tfloat)h_atlascoords[b].x + (tfloat)(dims.x / 2), (tfloat)h_atlascoords[b].y + (tfloat)(dims.y / 2));
}
d_Extract2DTransformed(d_atlas, d_datacart, toInt2(atlasdims), toInt2(effdims), h_scale, h_rotation, h_translation, T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datacart, d_datacart, Elements(effdims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datacart, d_datacartFFT, &planforwTrans);
d_ComplexMultiplyByConjVector(d_datacartFFT, d_targetscartFFT + ElementsFFT(effdims) * t, d_datacartFFT, ElementsFFT(effdims), batch);
d_IFFTC2R(d_datacartFFT, d_datacart, &planbackTrans, effdims);
d_RemapFullFFT2Full(d_datacart, d_datacart, effdims, batch);
d_MultiplyByVector(d_datacart, d_maskcart, d_datacart, Elements(effdims), batch);
d_Peak(d_datacart, d_peakpos, d_peakvalues, effdims, T_PEAK_INTEGER, NULL, NULL, batch);
d_SubtractScalar((tfloat*)d_peakpos, (tfloat*)d_peakpos, batch * 3, (tfloat)(effdims.x / 2));
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(effdims));
hipMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), hipMemcpyDeviceToHost);
hipMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), hipMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
if (h_peakvalues[b] > h_scorestrans[batch * t + b])
{
h_intermedparams[batch * t + b].x = h_peakpos[b].x;
h_intermedparams[batch * t + b].y = h_peakpos[b].y;
h_intermedparams[batch * t + b].z = (tfloat)a;
h_scorestrans[batch * t + b] = h_peakvalues[b];
}
}
}
}
}
//else
if (iterations == 0)
iterations = 1;
{
for (int iteration = 0; iteration < iterations; iteration++)
{
if (mode & T_ALIGN_ROT)
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
h_translation[b] = tfloat2((tfloat)(h_atlascoords[b].x + padding) + h_params[b].x, (tfloat)(h_atlascoords[b].y + padding) + h_params[b].y);
hipMemcpy(d_translation, h_translation, batch * sizeof(tfloat2), hipMemcpyHostToDevice);
//d_CartAtlas2Polar(d_atlas, d_datapolar, d_translation, toInt2(atlasdims.x, atlasdims.y), toInt2(effdims.x, effdims.y), T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datapolar, d_datapolar, Elements(polardims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datapolar, d_datapolarFFT, &planforwRot);
d_ComplexMultiplyByConjVector(d_datapolarFFT, d_targetspolarFFT + ElementsFFT(polardims) * t, d_datapolarFFT, ElementsFFT(polardims), batch);
d_IFFTC2R(d_datapolarFFT, d_datapolar, &planbackRot, polardims);
d_Extract(d_datapolar, d_polarextract, polardims, toInt3(1, polardims.y, 1), toInt3(0, polardims.y / 2, 0), batch);
d_Scale(d_polarextract, d_polarextractboost, toInt3(polardims.y, 1, 1), toInt3(polardims.y * polarboost, 1, 1), T_INTERP_FOURIER, NULL, NULL, batch);
d_MultiplyByVector(d_polarextractboost, d_maskpolar, d_polarextractboost, polardims.y * polarboost, batch);
d_Peak(d_polarextractboost, d_peakpos, d_peakvalues, toInt3(polardims.y * polarboost, 1, 1), T_PEAK_INTEGER, NULL, NULL, batch);
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(polardims));
hipMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), hipMemcpyDeviceToHost);
hipMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), hipMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
h_peakvalues[b] /= (tfloat)Elements(polardims);
if (h_peakvalues[b] > h_scoresrot[batch * t + b])
{
if (abs(h_peakpos[b].x - (tfloat)(polardims.y * polarboost)) < h_peakpos[b].x)
h_peakpos[b].x = h_peakpos[b].x - (tfloat)(polardims.y * polarboost);
h_intermedparams[batch * t + b].z = h_peakpos[b].x / (tfloat)(polardims.y * polarboost) * PI2;
h_scoresrot[batch * t + b] = h_peakvalues[b];
}
}
}
}
if (mode & T_ALIGN_TRANS)
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
{
h_rotation[b] = h_params[b].z;
h_translation[b] = tfloat2((tfloat)h_atlascoords[b].x + (tfloat)(dims.x / 2), (tfloat)h_atlascoords[b].y + (tfloat)(dims.y / 2));
}
d_Extract2DTransformed(d_atlas, d_datacart, toInt2(atlasdims), toInt2(effdims), h_scale, h_rotation, h_translation, T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datacart, d_datacart, Elements(effdims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datacart, d_datacartFFT, &planforwTrans);
d_ComplexMultiplyByConjVector(d_datacartFFT, d_targetscartFFT + ElementsFFT(effdims) * t, d_datacartFFT, ElementsFFT(effdims), batch);
d_IFFTC2R(d_datacartFFT, d_datacart, &planbackTrans, effdims);
d_RemapFullFFT2Full(d_datacart, d_datacart, effdims, batch);
d_MultiplyByVector(d_datacart, d_maskcart, d_datacart, Elements(effdims), batch);
d_Peak(d_datacart, d_peakpos, d_peakvalues, effdims, T_PEAK_SUBCOARSE, NULL, NULL, batch);
d_SubtractScalar((tfloat*)d_peakpos, (tfloat*)d_peakpos, batch * 3, (tfloat)(effdims.x / 2));
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(effdims));
hipMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), hipMemcpyDeviceToHost);
hipMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), hipMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
if (h_peakvalues[b] > h_scorestrans[batch * t + b])
{
h_intermedparams[batch * t + b].x = h_peakpos[b].x;
h_intermedparams[batch * t + b].y = h_peakpos[b].y;
h_scorestrans[batch * t + b] = h_peakvalues[b];
}
}
}
}
if (mode != T_ALIGN_BOTH)
break;
}
}
#pragma region AssignMembership
for (int b = 0; b < batch; b++)
{
tfloat bestscore = (tfloat)-999;
for (int t = 0; t < numtargets; t++)
{
if (max(h_scoresrot[batch * t + b], h_scorestrans[batch * t + b]) > bestscore)
{
bestscore = max(h_scoresrot[batch * t + b], h_scorestrans[batch * t + b]);
h_params[b] = h_intermedparams[batch * t + b];
h_membership[b] = t;
}
}
h_scores[b] = bestscore;
}
#pragma endregion
#pragma region Cleanup
free(h_translation);
free(h_rotation);
free(h_scale);
free(h_peakvalues);
free(h_peakpos);
free(h_intermedparams);
free(h_scorestrans);
free(h_scoresrot);
if (mode & T_ALIGN_ROT)
{
hipfftDestroy(planforwRot);
hipfftDestroy(planbackRot);
}
if (mode & T_ALIGN_ROT)
{
hipfftDestroy(planforwTrans);
hipfftDestroy(planbackTrans);
}
hipFree(d_peakvalues);
hipFree(d_peakpos);
hipFree(d_polarextractboost);
hipFree(d_polarextract);
hipFree(d_datapolarFFT);
hipFree(d_datacartFFT);
hipFree(d_datapolar);
hipFree(d_datacart);
free(h_atlascoords);
hipFree(d_atlas);
hipFree(d_targetspolarFFT);
hipFree(d_targetscartFFT);
#pragma endregion
}
} | 61496c4d799f6f98196d5e44e691a53a5154f1d6.cu | #include "Prerequisites.cuh"
#include "Alignment.cuh"
#include "Correlation.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////////////
//Equivalent of tom_os3_alignStack//
////////////////////////////////////
void d_Align2D(tfloat* d_input, tfloat* d_targets, int3 dims, int numtargets, tfloat3* h_params, int* h_membership, tfloat* h_scores, int maxtranslation, tfloat maxrotation, int iterations, T_ALIGN_MODE mode, int batch)
{
int polarboost = 100; //Sub-pixel precision for polar correlation peak
int padding = max(dims.x / 2 - (int)((tfloat)1 / (sin(min(ToRad(90), ToRad(45) + maxrotation)) / sin(ToRad(45))) * (tfloat)(dims.x / 2)), maxtranslation);
int3 effdims = toInt3(dims.x - padding * 2, dims.y - padding * 2, 1);
int3 polardims = toInt3(GetCart2PolarSize(toInt2(effdims.x, effdims.y)));
#pragma region Targets
tcomplex* d_targetscartFFT;
cudaMalloc((void**)&d_targetscartFFT, ElementsFFT(effdims) * numtargets * sizeof(tcomplex));
tcomplex* d_targetspolarFFT;
cudaMalloc((void**)&d_targetspolarFFT, ElementsFFT(polardims) * numtargets * sizeof(tcomplex));
{
tfloat* d_targetscart;
cudaMalloc((void**)&d_targetscart, Elements(effdims) * numtargets * sizeof(tfloat));
tfloat* d_targetspolar;
cudaMalloc((void**)&d_targetspolar, Elements(polardims) * numtargets * sizeof(tfloat));
d_Extract(d_targets, d_targetscart, dims, effdims, toInt3(dims.x / 2, dims.y / 2, 0), numtargets);
d_Cart2Polar(d_targetscart, d_targetspolar, toInt2(effdims.x, effdims.y), T_INTERP_CUBIC, numtargets);
d_NormMonolithic(d_targetscart, d_targetscart, Elements(effdims), NULL, T_NORM_MEAN01STD, numtargets);
d_NormMonolithic(d_targetspolar, d_targetspolar, Elements(polardims), NULL, T_NORM_MEAN01STD, numtargets);
d_FFTR2C(d_targetscart, d_targetscartFFT, 2, effdims, numtargets);
d_FFTR2C(d_targetspolar, d_targetspolarFFT, 2, polardims, numtargets);
cudaFree(d_targetspolar);
cudaFree(d_targetscart);
}
#pragma endregion
#pragma region Atlas
int3 atlasdims = toInt3(1, 1, 1);
int2 atlasprimitives = toInt2(1, 1);
int2* h_atlascoords = (int2*)malloc(batch * sizeof(int2));
tfloat* d_atlas = d_MakeAtlas(d_input, toInt3(dims.x, dims.y, batch), atlasdims, atlasprimitives, h_atlascoords);
int atlasrow = atlasprimitives.x;
#pragma endregion
#pragma region Masks
tfloat* d_maskcart = CudaMallocValueFilled(Elements(effdims), (tfloat)1 / (tfloat)Elements(effdims));
tfloat* d_maskpolar;
cudaMalloc((void**)&d_maskpolar, polardims.y * polarboost * sizeof(tfloat));
{
tfloat fmaxtranslation = (tfloat)(maxtranslation + 1);
d_SphereMask(d_maskcart, d_maskcart, effdims, &fmaxtranslation, (tfloat)1, (tfloat3*)NULL);
tfloat* h_maskpolar = MallocValueFilled(polardims.y * polarboost, (tfloat)0);
h_maskpolar[0] = (tfloat)1;
for (int a = 1; a < (int)ceil(maxrotation / PI2 * (tfloat)(polardims.y * polarboost)); a++)
{
h_maskpolar[a] = (tfloat)1;
h_maskpolar[polardims.y * polarboost - a] = (tfloat)1;
}
cudaMemcpy(d_maskpolar, h_maskpolar, polardims.y * polarboost * sizeof(tfloat), cudaMemcpyHostToDevice);
free(h_maskpolar);
}
#pragma endregion
tfloat* d_datacart;
cudaMalloc((void**)&d_datacart, Elements(effdims) * batch * sizeof(tfloat));
tfloat* d_datapolar;
cudaMalloc((void**)&d_datapolar, Elements(polardims) * batch * sizeof(tfloat));
tcomplex* d_datacartFFT;
cudaMalloc((void**)&d_datacartFFT, ElementsFFT(effdims) * batch * sizeof(tcomplex));
tcomplex* d_datapolarFFT;
cudaMalloc((void**)&d_datapolarFFT, ElementsFFT(polardims) * batch * sizeof(tcomplex));
tfloat* d_polarextract;
cudaMalloc((void**)&d_polarextract, polardims.y * batch * sizeof(tfloat));
tfloat* d_polarextractboost;
cudaMalloc((void**)&d_polarextractboost, polardims.y * polarboost * batch * sizeof(tfloat));
tfloat3* d_peakpos;
cudaMalloc((void**)&d_peakpos, batch * sizeof(tfloat3));
tfloat* d_peakvalues;
cudaMalloc((void**)&d_peakvalues, batch * sizeof(tfloat));
tfloat* h_scoresrot = MallocValueFilled(batch * numtargets, (tfloat)0);
tfloat* h_scorestrans = MallocValueFilled(batch * numtargets, (tfloat)0);
tfloat3* h_intermedparams = (tfloat3*)malloc(batch * numtargets * sizeof(tfloat3));
for (int t = 0; t < numtargets; t++)
memcpy(h_intermedparams + t * batch, h_params, batch * sizeof(tfloat3));
tfloat3* h_peakpos = (tfloat3*)malloc(batch * sizeof(tfloat3));
tfloat* h_peakvalues = (tfloat*)malloc(batch * sizeof(tfloat));
tfloat2* h_scale = (tfloat2*)MallocValueFilled(batch * 2, (tfloat)1);
tfloat* h_rotation = (tfloat*)malloc(batch * sizeof(tfloat));
tfloat2* h_translation = (tfloat2*)malloc(batch * sizeof(tfloat2));
tfloat2* d_translation = (tfloat2*)CudaMallocValueFilled(batch * 2, (tfloat)0);
cufftHandle planforwTrans, planbackTrans;
cufftHandle planforwRot, planbackRot;
if (mode & T_ALIGN_ROT)
{
planforwRot = d_FFTR2CGetPlan(2, polardims, batch);
planbackRot = d_IFFTC2RGetPlan(2, polardims, batch);
}
if (mode & T_ALIGN_ROT)
{
planforwTrans = d_FFTR2CGetPlan(2, effdims, batch);
planbackTrans = d_IFFTC2RGetPlan(2, effdims, batch);
}
if (iterations == 0 && mode == T_ALIGN_BOTH)
{
for (double a = (double)-maxrotation; a < (double)maxrotation; a += (double)ToRad(0.5))
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
{
h_rotation[b] = (tfloat)a;
h_translation[b] = tfloat2((tfloat)h_atlascoords[b].x + (tfloat)(dims.x / 2), (tfloat)h_atlascoords[b].y + (tfloat)(dims.y / 2));
}
d_Extract2DTransformed(d_atlas, d_datacart, toInt2(atlasdims), toInt2(effdims), h_scale, h_rotation, h_translation, T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datacart, d_datacart, Elements(effdims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datacart, d_datacartFFT, &planforwTrans);
d_ComplexMultiplyByConjVector(d_datacartFFT, d_targetscartFFT + ElementsFFT(effdims) * t, d_datacartFFT, ElementsFFT(effdims), batch);
d_IFFTC2R(d_datacartFFT, d_datacart, &planbackTrans, effdims);
d_RemapFullFFT2Full(d_datacart, d_datacart, effdims, batch);
d_MultiplyByVector(d_datacart, d_maskcart, d_datacart, Elements(effdims), batch);
d_Peak(d_datacart, d_peakpos, d_peakvalues, effdims, T_PEAK_INTEGER, NULL, NULL, batch);
d_SubtractScalar((tfloat*)d_peakpos, (tfloat*)d_peakpos, batch * 3, (tfloat)(effdims.x / 2));
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(effdims));
cudaMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), cudaMemcpyDeviceToHost);
cudaMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), cudaMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
if (h_peakvalues[b] > h_scorestrans[batch * t + b])
{
h_intermedparams[batch * t + b].x = h_peakpos[b].x;
h_intermedparams[batch * t + b].y = h_peakpos[b].y;
h_intermedparams[batch * t + b].z = (tfloat)a;
h_scorestrans[batch * t + b] = h_peakvalues[b];
}
}
}
}
}
//else
if (iterations == 0)
iterations = 1;
{
for (int iteration = 0; iteration < iterations; iteration++)
{
if (mode & T_ALIGN_ROT)
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
h_translation[b] = tfloat2((tfloat)(h_atlascoords[b].x + padding) + h_params[b].x, (tfloat)(h_atlascoords[b].y + padding) + h_params[b].y);
cudaMemcpy(d_translation, h_translation, batch * sizeof(tfloat2), cudaMemcpyHostToDevice);
//d_CartAtlas2Polar(d_atlas, d_datapolar, d_translation, toInt2(atlasdims.x, atlasdims.y), toInt2(effdims.x, effdims.y), T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datapolar, d_datapolar, Elements(polardims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datapolar, d_datapolarFFT, &planforwRot);
d_ComplexMultiplyByConjVector(d_datapolarFFT, d_targetspolarFFT + ElementsFFT(polardims) * t, d_datapolarFFT, ElementsFFT(polardims), batch);
d_IFFTC2R(d_datapolarFFT, d_datapolar, &planbackRot, polardims);
d_Extract(d_datapolar, d_polarextract, polardims, toInt3(1, polardims.y, 1), toInt3(0, polardims.y / 2, 0), batch);
d_Scale(d_polarextract, d_polarextractboost, toInt3(polardims.y, 1, 1), toInt3(polardims.y * polarboost, 1, 1), T_INTERP_FOURIER, NULL, NULL, batch);
d_MultiplyByVector(d_polarextractboost, d_maskpolar, d_polarextractboost, polardims.y * polarboost, batch);
d_Peak(d_polarextractboost, d_peakpos, d_peakvalues, toInt3(polardims.y * polarboost, 1, 1), T_PEAK_INTEGER, NULL, NULL, batch);
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(polardims));
cudaMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), cudaMemcpyDeviceToHost);
cudaMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), cudaMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
h_peakvalues[b] /= (tfloat)Elements(polardims);
if (h_peakvalues[b] > h_scoresrot[batch * t + b])
{
if (abs(h_peakpos[b].x - (tfloat)(polardims.y * polarboost)) < h_peakpos[b].x)
h_peakpos[b].x = h_peakpos[b].x - (tfloat)(polardims.y * polarboost);
h_intermedparams[batch * t + b].z = h_peakpos[b].x / (tfloat)(polardims.y * polarboost) * PI2;
h_scoresrot[batch * t + b] = h_peakvalues[b];
}
}
}
}
if (mode & T_ALIGN_TRANS)
{
for (int t = 0; t < numtargets; t++)
{
memcpy(h_params, h_intermedparams + batch * t, batch * sizeof(tfloat3));
for (int b = 0; b < batch; b++)
{
h_rotation[b] = h_params[b].z;
h_translation[b] = tfloat2((tfloat)h_atlascoords[b].x + (tfloat)(dims.x / 2), (tfloat)h_atlascoords[b].y + (tfloat)(dims.y / 2));
}
d_Extract2DTransformed(d_atlas, d_datacart, toInt2(atlasdims), toInt2(effdims), h_scale, h_rotation, h_translation, T_INTERP_LINEAR, batch);
d_NormMonolithic(d_datacart, d_datacart, Elements(effdims), NULL, T_NORM_MEAN01STD, batch);
d_FFTR2C(d_datacart, d_datacartFFT, &planforwTrans);
d_ComplexMultiplyByConjVector(d_datacartFFT, d_targetscartFFT + ElementsFFT(effdims) * t, d_datacartFFT, ElementsFFT(effdims), batch);
d_IFFTC2R(d_datacartFFT, d_datacart, &planbackTrans, effdims);
d_RemapFullFFT2Full(d_datacart, d_datacart, effdims, batch);
d_MultiplyByVector(d_datacart, d_maskcart, d_datacart, Elements(effdims), batch);
d_Peak(d_datacart, d_peakpos, d_peakvalues, effdims, T_PEAK_SUBCOARSE, NULL, NULL, batch);
d_SubtractScalar((tfloat*)d_peakpos, (tfloat*)d_peakpos, batch * 3, (tfloat)(effdims.x / 2));
d_MultiplyByScalar(d_peakvalues, d_peakvalues, batch, (tfloat)1 / (tfloat)Elements(effdims));
cudaMemcpy(h_peakpos, d_peakpos, batch * sizeof(tfloat3), cudaMemcpyDeviceToHost);
cudaMemcpy(h_peakvalues, d_peakvalues, batch * sizeof(tfloat), cudaMemcpyDeviceToHost);
for (int b = 0; b < batch; b++)
{
if (h_peakvalues[b] > h_scorestrans[batch * t + b])
{
h_intermedparams[batch * t + b].x = h_peakpos[b].x;
h_intermedparams[batch * t + b].y = h_peakpos[b].y;
h_scorestrans[batch * t + b] = h_peakvalues[b];
}
}
}
}
if (mode != T_ALIGN_BOTH)
break;
}
}
#pragma region AssignMembership
for (int b = 0; b < batch; b++)
{
tfloat bestscore = (tfloat)-999;
for (int t = 0; t < numtargets; t++)
{
if (max(h_scoresrot[batch * t + b], h_scorestrans[batch * t + b]) > bestscore)
{
bestscore = max(h_scoresrot[batch * t + b], h_scorestrans[batch * t + b]);
h_params[b] = h_intermedparams[batch * t + b];
h_membership[b] = t;
}
}
h_scores[b] = bestscore;
}
#pragma endregion
#pragma region Cleanup
free(h_translation);
free(h_rotation);
free(h_scale);
free(h_peakvalues);
free(h_peakpos);
free(h_intermedparams);
free(h_scorestrans);
free(h_scoresrot);
if (mode & T_ALIGN_ROT)
{
cufftDestroy(planforwRot);
cufftDestroy(planbackRot);
}
if (mode & T_ALIGN_ROT)
{
cufftDestroy(planforwTrans);
cufftDestroy(planbackTrans);
}
cudaFree(d_peakvalues);
cudaFree(d_peakpos);
cudaFree(d_polarextractboost);
cudaFree(d_polarextract);
cudaFree(d_datapolarFFT);
cudaFree(d_datacartFFT);
cudaFree(d_datapolar);
cudaFree(d_datacart);
free(h_atlascoords);
cudaFree(d_atlas);
cudaFree(d_targetspolarFFT);
cudaFree(d_targetscartFFT);
#pragma endregion
}
} |
e5bfdd22eba30ec1e08e4fbd7b9052dc8dbe319d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void intArrayAdd(int size, const int *input, int *output, const int *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix* length];
int *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
} | e5bfdd22eba30ec1e08e4fbd7b9052dc8dbe319d.cu | #include "includes.h"
__global__ void intArrayAdd(int size, const int *input, int *output, const int *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix* length];
int *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
} |
a6c93cde6f6e1702aa940c7b5c3bf0173ec3ce69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void norm1_strided_double(int n, int xOffset,double *dx,int incx,double result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i % incx == 0)
result += abs(dx[i]);
}
} | a6c93cde6f6e1702aa940c7b5c3bf0173ec3ce69.cu | #include "includes.h"
__global__ void norm1_strided_double(int n, int xOffset,double *dx,int incx,double result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i % incx == 0)
result += abs(dx[i]);
}
} |
a11df1d8d5c2c81df8ee73ba973035e0da85ba5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "reorder_one_bit.cuh"
#include "reorder_one_bit_impl.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
template <class T>
void ReorderOneBit(
ui32 size,
TReorderOneBitContext<ui32, T> context,
ui32* keys,
T* values,
int bit,
TCudaStream stream) {
if (size) {
hipMemcpyAsync(context.TempValues.Get(), values, sizeof(T) * size, hipMemcpyDefault, stream);
hipMemcpyAsync(context.TempKeys.Get(), keys, sizeof(ui32) * size, hipMemcpyDefault, stream);
{
using TInput = TScanBitIterator<ui32>;
TInput inputIter(context.TempKeys.Get(), bit);
hipcub::DeviceScan::ExclusiveSum < TInput, int*> (context.ScanTempBuffer.Get(),
context.ScanTempBufferSize,
inputIter,
context.Offsets.Get(),
size,
stream);
}
const int blockSize = 512;
const int N = 1;
const int numBlocks = (size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<ui32, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
context.TempKeys,
context.TempValues,
context.Offsets,
bit,
keys,
values,
size);
}
}
ui64 ReorderBitTempSize(ui32 size) {
ui64 sizeInBytes = 0;
using TInput = TScanBitIterator<ui32>;
TInput fakeInput(nullptr, 0);
hipcub::DeviceScan::ExclusiveSum< TInput, int * > (nullptr,
sizeInBytes,
fakeInput,
nullptr,
size);
return sizeInBytes;
}
template void ReorderOneBit<ui32>(
ui32 size,
TReorderOneBitContext<ui32, ui32> context,
ui32* keys,
ui32* values,
int bit,
TCudaStream stream);
}
| a11df1d8d5c2c81df8ee73ba973035e0da85ba5b.cu | #include "reorder_one_bit.cuh"
#include "reorder_one_bit_impl.cuh"
#include <contrib/libs/cub/cub/device/device_scan.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
template <class T>
void ReorderOneBit(
ui32 size,
TReorderOneBitContext<ui32, T> context,
ui32* keys,
T* values,
int bit,
TCudaStream stream) {
if (size) {
cudaMemcpyAsync(context.TempValues.Get(), values, sizeof(T) * size, cudaMemcpyDefault, stream);
cudaMemcpyAsync(context.TempKeys.Get(), keys, sizeof(ui32) * size, cudaMemcpyDefault, stream);
{
using TInput = TScanBitIterator<ui32>;
TInput inputIter(context.TempKeys.Get(), bit);
cub::DeviceScan::ExclusiveSum < TInput, int*> (context.ScanTempBuffer.Get(),
context.ScanTempBufferSize,
inputIter,
context.Offsets.Get(),
size,
stream);
}
const int blockSize = 512;
const int N = 1;
const int numBlocks = (size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<ui32, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
context.TempKeys,
context.TempValues,
context.Offsets,
bit,
keys,
values,
size);
}
}
ui64 ReorderBitTempSize(ui32 size) {
ui64 sizeInBytes = 0;
using TInput = TScanBitIterator<ui32>;
TInput fakeInput(nullptr, 0);
cub::DeviceScan::ExclusiveSum< TInput, int * > (nullptr,
sizeInBytes,
fakeInput,
nullptr,
size);
return sizeInBytes;
}
template void ReorderOneBit<ui32>(
ui32 size,
TReorderOneBitContext<ui32, ui32> context,
ui32* keys,
ui32* values,
int bit,
TCudaStream stream);
}
|
e2cbb6cd064296526b2ca5dc9a7869aa7fb0ff79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//In theory, GPU accelerated code
#include <iostream>
#include <math.h>
using namespace std;
__global__ //Kernel function to add the elements of two arrays
void add(int n, float *x, float *y)
{
//int index = threadIdx.x; //Contains index of current thread within it's block
//int stride = blockDim.x; //Number of threads in the block
//gridDim.x is number of blocks in the grid
//blockId.x contains the index of the current thread block on the grid
int index = blockIdx.x * blockDim.x + threadIdx.x;
//Each thread get's it's index by computing the offset to the beginning of it's block and adding the threads index within the block.
int stride = blockDim.x * gridDim.x;
//Stride is the total number of threads in the grid.
for(int i= index; i < n; i += stride) y[i] = x[i] + y[i]; //Note: i is now the thread index, and each loop through changes to next thread in the block
}
int main(void)
{
int N = 1<<20;
float *x, *y;
//Allovate unified memory - accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
//Initiallise the x and y arrays on the host
for(int i= 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
//Calculate the number of blocks of parallel threads to launch
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
//Run kernel on 1M elements on the GPU
//CUDA GPU's run kernels using blocks of threads that are a multiple of 32 in size, so 256 threads is a reasonable size to choose
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
//Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i=0; i < N; i++) maxError = fmax(maxError, fabs(y[i] -3.0f));
cout << "Max error: " << maxError << endl;
//Free memory
hipFree(x);
hipFree(y);
return 0;
}
| e2cbb6cd064296526b2ca5dc9a7869aa7fb0ff79.cu |
//In theory, GPU accelerated code
#include <iostream>
#include <math.h>
using namespace std;
__global__ //Kernel function to add the elements of two arrays
void add(int n, float *x, float *y)
{
//int index = threadIdx.x; //Contains index of current thread within it's block
//int stride = blockDim.x; //Number of threads in the block
//gridDim.x is number of blocks in the grid
//blockId.x contains the index of the current thread block on the grid
int index = blockIdx.x * blockDim.x + threadIdx.x;
//Each thread get's it's index by computing the offset to the beginning of it's block and adding the threads index within the block.
int stride = blockDim.x * gridDim.x;
//Stride is the total number of threads in the grid.
for(int i= index; i < n; i += stride) y[i] = x[i] + y[i]; //Note: i is now the thread index, and each loop through changes to next thread in the block
}
int main(void)
{
int N = 1<<20;
float *x, *y;
//Allovate unified memory - accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
//Initiallise the x and y arrays on the host
for(int i= 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
//Calculate the number of blocks of parallel threads to launch
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
//Run kernel on 1M elements on the GPU
//CUDA GPU's run kernels using blocks of threads that are a multiple of 32 in size, so 256 threads is a reasonable size to choose
add<<<numBlocks, blockSize>>>(N, x, y);
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0; i < N; i++) maxError = fmax(maxError, fabs(y[i] -3.0f));
cout << "Max error: " << maxError << endl;
//Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
22776bb5ac892fa41f128f46cf0592ef4ce2a413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/yoloplugin.h"
#include "../include/utils.h"
#include <assert.h>
using namespace Yolo_plugin;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo_plugin::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
//for input is [anchor1,anchor2,anchor3],every anchor is [x1,x2,x3...x_total_grid,y1,y2,..y_total_grid,w1,..w_total_grid,h1,h2,..h_total_grid,boxconf_1,boxconf_2,..boxconf_3..boxconf_total_grid,class1_1,class1_2,class1_3,...class1_total_grid,class2_1,class2_2..class2_total_grid,...classn_total_grid]
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = expf(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = expf(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(hipMemcpy(devAnchor, yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(hipFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 22776bb5ac892fa41f128f46cf0592ef4ce2a413.cu | #include "../include/yoloplugin.h"
#include "../include/utils.h"
#include <assert.h>
using namespace Yolo_plugin;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo_plugin::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
//for input is [anchor1,anchor2,anchor3],every anchor is [x1,x2,x3...x_total_grid,y1,y2,..y_total_grid,w1,..w_total_grid,h1,h2,..h_total_grid,boxconf_1,boxconf_2,..boxconf_3..boxconf_total_grid,class1_1,class1_2,class1_3,...class1_total_grid,class2_1,class2_2..class2_total_grid,...classn_total_grid]
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = expf(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = expf(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(cudaMemcpy(devAnchor, yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(cudaFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
51141e2b6b03e508da413974ad62230fec7d4c7f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
void s_addmm_out_csr_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& crow_indices, Tensor& col_indices, Tensor& values, const Tensor& dense) {
TORCH_INTERNAL_ASSERT(nnz > 0);
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor r__;
if (cast_beta == scalar_t(0)) {
r_.zero_();
} else if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// Note: This storage arrangement is preferred due to most of the CUDA kernels handle only contiguous tensors
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) == dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
crow_indices.data_ptr<int32_t>(),
col_indices.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
if (!is_same_tensor(r__, r_)) {
r_.copy_(r__);
}
}
);
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar), dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel), dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
| 51141e2b6b03e508da413974ad62230fec7d4c7f.cu | #include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
void s_addmm_out_csr_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& crow_indices, Tensor& col_indices, Tensor& values, const Tensor& dense) {
TORCH_INTERNAL_ASSERT(nnz > 0);
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
Tensor r__;
if (cast_beta == scalar_t(0)) {
r_.zero_();
} else if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// Note: This storage arrangement is preferred due to most of the CUDA kernels handle only contiguous tensors
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) == dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
crow_indices.data_ptr<int32_t>(),
col_indices.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
if (!is_same_tensor(r__, r_)) {
r_.copy_(r__);
}
}
);
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
23d772b4f6b410b8d73e047087cecb9f1d4eff88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE 300000
#define BLOCK_SIZE 128
__global__ void reduction(int *A, int *B){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
B[threadIdx.x] = A[tid]; // init max value
__syncthreads();
// iterate of log base 2 block dimension. using stride of 2
for(int i = 1; i< blockDim.x; i *= 2){
if(threadIdx.x%(2*i) == 0){
if(B[threadIdx.x] < B[threadIdx.x + i]){
B[threadIdx.x] = B[threadIdx.x + i];
}
}
__syncthreads();
}
}
int main(){
int A[SIZE];
int * B;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
srand(time(NULL));
int * d_A, * d_B;
size_t size = SIZE*sizeof(int);
int GRIDSIZE = SIZE / (BLOCK_SIZE<<1);
if (GRIDSIZE % (BLOCK_SIZE<<1))
GRIDSIZE++;
B = (int *) malloc(sizeof(int)*GRIDSIZE);
dim3 dimBlock(BLOCK_SIZE,1,1);
dim3 dimGrid(GRIDSIZE,1,1);
for(int i = 0; i < SIZE; i++){
A[i] = rand()%10000;
if(i<GRIDSIZE)
B[i] = 0;
}
hipEventRecord(start);
hipMalloc((void **)&d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMalloc((void **)&d_B, GRIDSIZE*sizeof(int));
hipLaunchKernelGGL(( reduction), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B);
hipEventRecord(stop);
hipMemcpy(B, d_B, GRIDSIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 1; i < GRIDSIZE; i++){
if(B[0] < B[i])
B[0] = B[i];
}
hipEventSynchronize(stop);
float elapsed = 0;
hipEventElapsedTime(&elapsed, start, stop);
printf("Using Grid Size [%d, %d] and Block Size [%d, %d]..\n", dimGrid.x, dimGrid.y,dimBlock.x, dimBlock.y);
printf("maximum : %d\n", B[0]);
printf("Execution time : %f ms\n", elapsed);
hipFree(d_A);
hipFree(d_B);
}
| 23d772b4f6b410b8d73e047087cecb9f1d4eff88.cu | #include <stdio.h>
#include <stdlib.h>
#define SIZE 300000
#define BLOCK_SIZE 128
__global__ void reduction(int *A, int *B){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
B[threadIdx.x] = A[tid]; // init max value
__syncthreads();
// iterate of log base 2 block dimension. using stride of 2
for(int i = 1; i< blockDim.x; i *= 2){
if(threadIdx.x%(2*i) == 0){
if(B[threadIdx.x] < B[threadIdx.x + i]){
B[threadIdx.x] = B[threadIdx.x + i];
}
}
__syncthreads();
}
}
int main(){
int A[SIZE];
int * B;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
srand(time(NULL));
int * d_A, * d_B;
size_t size = SIZE*sizeof(int);
int GRIDSIZE = SIZE / (BLOCK_SIZE<<1);
if (GRIDSIZE % (BLOCK_SIZE<<1))
GRIDSIZE++;
B = (int *) malloc(sizeof(int)*GRIDSIZE);
dim3 dimBlock(BLOCK_SIZE,1,1);
dim3 dimGrid(GRIDSIZE,1,1);
for(int i = 0; i < SIZE; i++){
A[i] = rand()%10000;
if(i<GRIDSIZE)
B[i] = 0;
}
cudaEventRecord(start);
cudaMalloc((void **)&d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_B, GRIDSIZE*sizeof(int));
reduction<<<dimGrid,dimBlock>>>(d_A, d_B);
cudaEventRecord(stop);
cudaMemcpy(B, d_B, GRIDSIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 1; i < GRIDSIZE; i++){
if(B[0] < B[i])
B[0] = B[i];
}
cudaEventSynchronize(stop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Using Grid Size [%d, %d] and Block Size [%d, %d]..\n", dimGrid.x, dimGrid.y,dimBlock.x, dimBlock.y);
printf("maximum : %d\n", B[0]);
printf("Execution time : %f ms\n", elapsed);
cudaFree(d_A);
cudaFree(d_B);
}
|
20b65928777f140f48a33ede4091e1e685fd15ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
__global__ void
simple_copy_kernel( float* g_idata, float* g_odata, size_t N)
{
// thread copy
size_t gtid = blockDim.x * blockIdx.x + threadIdx.x;
if (gtid < N)
g_odata[gtid] = g_idata[gtid];
}
void run_device_mem_local_to_gpu(float* h_idata, size_t h_size, size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
// setup execution parameters
// adjust thread block sizes here
hipDeviceReset();
// find the required number of blocks on the grid
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1);
printf("GRID SIZE %d\n", grid_size);
}
else {
grid_size = h_size / thread_count;
printf("GRID SIZE %d\n", grid_size);
}
/*
* SWITCH TO DEVICE 1
*/
hipSetDevice(d1);
unsigned int mem_size = sizeof( float) * h_size;
//printf("MEMORY SIZE = %lu", mem_size);
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
printf("threads %d block_count %d\n\n", thread_count, grid_size);
// allocate device memory
float* d_idata;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
hipError_t cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// copy host memory to device
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size, hipMemcpyHostToDevice) );
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// allocate device memory for result
float* d_odata;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
printf("threads %d block_count %d\n\n", thread_count, grid_size);
hipLaunchKernelGGL(( simple_copy_kernel), dim3(grid), dim3(threads), 0, 0, d_idata, d_odata,h_size);
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// cleanup memory
free( h_odata);
CUDA_SAFE_CALL(hipFree(d_idata));
CUDA_SAFE_CALL(hipFree(d_odata));
}
void run_remote_peer_to_peer_memory_access(float* h_idata, size_t h_size,size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1) * thread_count;
}
else {
grid_size = h_size / thread_count;
}
unsigned int mem_size = sizeof( float) * h_size;
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
hipSetDevice(d1);
// allocate device memory
/*
* DEVICE MEMORY ALLOCATIONS FOR DEVICE ONE
*/
float* d_idata, *d_odata;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
// allocate device memory for result
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
/*
* MEMORY COPIES FOR DEVICE ONE
**/
// copy host memory to device
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
hipLaunchKernelGGL(( simple_copy_kernel), dim3(grid), dim3(threads),0, 0, d_idata, d_odata,h_size);
// check if kernel execution generated and error
hipError_t cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
CUT_CHECK_ERROR("Kernel execution failed");
// change GPU device
hipSetDevice(d2);
/*
* DEVICE MEMORY ALLOCATIONS FOR DEVICE TWO
**/
// allocate device memory
float* d_odata_two;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata_two, mem_size));
// Allow access to data between cards
hipDeviceEnablePeerAccess(0,0);
// execute the selected kernel
//CUDA_SAFE_CALL(hipMemcpyPeer(d_idata_two,1,d_idata,0,mem_size));
hipLaunchKernelGGL(( simple_copy_kernel), dim3(grid), dim3(threads),0, 0, d_idata, d_odata_two,h_size);
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
/*
* HOST MEMORY COPIES FROM DEVICE ONE
**/
hipSetDevice(d1);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
/*
* HOST MEMORY COPIES FROM DEVICE TWO
**/
hipSetDevice(d2);
// allocate mem for the result on host side
float* h_odata_two = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_odata_two, d_odata_two, mem_size,
hipMemcpyDeviceToHost) );
if(memcmp(h_odata,h_odata_two,mem_size) != 0) {
printf("FAILED TO BE EQUAL\n");
}
/*
* MEMORY CLEAN UP
**/
// cleanup memory
free( h_odata);
free( h_odata_two);
CUDA_SAFE_CALL(hipFree(d_idata));
CUDA_SAFE_CALL(hipFree(d_odata));
CUDA_SAFE_CALL(hipFree(d_odata_two));
}
void run_remote_memory_access_using_data_copy(float* h_idata, size_t h_size, size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1) * thread_count;
}
else {
grid_size = h_size / thread_count;
}
unsigned int mem_size = sizeof( float) * h_size;
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
hipSetDevice(d1);
// allocate device memory
float* d_idata, *d_odata;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
// allocate device memory for result
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
//printf ("blocks = %d\n", grid_size);
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
hipLaunchKernelGGL(( simple_copy_kernel), dim3(grid), dim3(threads),0, 0, d_idata, d_odata, h_size);
hipError_t cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
printf("ERROR: %s\n\n",hipGetErrorString(cuerr));
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size,
hipMemcpyDeviceToHost) );
// change GPU device
hipSetDevice(d2);
// allocate mem for the result on host side
float* h_odata_two = (float*) malloc( mem_size);
// allocate device memory
float* d_idata_two, *d_odata_two;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata_two, mem_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata_two, mem_size));
CUDA_SAFE_CALL( hipMemcpy( d_idata_two, h_odata, mem_size,
hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( simple_copy_kernel), dim3(grid), dim3(threads),0, 0, d_idata_two, d_odata_two, h_size);
cuerr = hipGetLastError();
if( cuerr != hipSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_odata_two, d_odata_two, mem_size,
hipMemcpyDeviceToHost) );
if(memcmp(h_odata,h_odata_two,mem_size) != 0) {
printf("FAILED TO BE EQUAL\n");
}
// cleanup memory
free( h_odata);
free( h_odata_two);
CUDA_SAFE_CALL(hipFree(d_idata));
CUDA_SAFE_CALL(hipFree(d_idata_two));
CUDA_SAFE_CALL(hipFree(d_odata));
CUDA_SAFE_CALL(hipFree(d_odata_two));
}
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
if (argc != 5) {
printf("%s <memory access 1 local 2 peer-to-peer 3 peer-to-peer-memcpy> <num elements> <device 1 id> <device 2 id>\n", argv[0]);
return 0;
}
CUT_DEVICE_INIT();
int memory_access = atoi(argv[1]);
int num_elements = atoi(argv[2]);
int device_one_id = atoi(argv[3]);
int device_two_id = atoi(argv[4]);
// allocate host memory
if ((num_elements % 32) != 0) {
num_elements = (num_elements / 32 + 1) * 32;
}
printf("number_elements: %d\n\n", num_elements);
float* h_idata = (float*) malloc( sizeof(float)* num_elements);
// initalize the memory
for( unsigned int i = 0; i < num_elements; ++i)
{
h_idata[i] = 0;
}
switch (memory_access) {
case 1:
run_device_mem_local_to_gpu(h_idata,num_elements, device_one_id,device_two_id);
break;
case 2:
run_remote_peer_to_peer_memory_access(h_idata,num_elements,device_one_id,device_two_id);
break;
case 3:
run_remote_memory_access_using_data_copy(h_idata,num_elements,device_one_id,device_two_id);
break;
default:
return -1;
}
free(h_idata);
return 0;
}
| 20b65928777f140f48a33ede4091e1e685fd15ed.cu |
////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
__global__ void
simple_copy_kernel( float* g_idata, float* g_odata, size_t N)
{
// thread copy
size_t gtid = blockDim.x * blockIdx.x + threadIdx.x;
if (gtid < N)
g_odata[gtid] = g_idata[gtid];
}
void run_device_mem_local_to_gpu(float* h_idata, size_t h_size, size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
// setup execution parameters
// adjust thread block sizes here
cudaDeviceReset();
// find the required number of blocks on the grid
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1);
printf("GRID SIZE %d\n", grid_size);
}
else {
grid_size = h_size / thread_count;
printf("GRID SIZE %d\n", grid_size);
}
/*
* SWITCH TO DEVICE 1
*/
cudaSetDevice(d1);
unsigned int mem_size = sizeof( float) * h_size;
//printf("MEMORY SIZE = %lu", mem_size);
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
printf("threads %d block_count %d\n\n", thread_count, grid_size);
// allocate device memory
float* d_idata;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
cudaError_t cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// allocate device memory for result
float* d_odata;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
printf("threads %d block_count %d\n\n", thread_count, grid_size);
simple_copy_kernel<<< grid, threads>>>( d_idata, d_odata,h_size);
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// cleanup memory
free( h_odata);
CUDA_SAFE_CALL(cudaFree(d_idata));
CUDA_SAFE_CALL(cudaFree(d_odata));
}
void run_remote_peer_to_peer_memory_access(float* h_idata, size_t h_size,size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1) * thread_count;
}
else {
grid_size = h_size / thread_count;
}
unsigned int mem_size = sizeof( float) * h_size;
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
cudaSetDevice(d1);
// allocate device memory
/*
* DEVICE MEMORY ALLOCATIONS FOR DEVICE ONE
*/
float* d_idata, *d_odata;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
// allocate device memory for result
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
/*
* MEMORY COPIES FOR DEVICE ONE
**/
// copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
simple_copy_kernel<<< grid, threads,0>>>( d_idata, d_odata,h_size);
// check if kernel execution generated and error
cudaError_t cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
CUT_CHECK_ERROR("Kernel execution failed");
// change GPU device
cudaSetDevice(d2);
/*
* DEVICE MEMORY ALLOCATIONS FOR DEVICE TWO
**/
// allocate device memory
float* d_odata_two;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata_two, mem_size));
// Allow access to data between cards
cudaDeviceEnablePeerAccess(0,0);
// execute the selected kernel
//CUDA_SAFE_CALL(cudaMemcpyPeer(d_idata_two,1,d_idata,0,mem_size));
simple_copy_kernel<<< grid, threads,0>>>( d_idata, d_odata_two,h_size);
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
/*
* HOST MEMORY COPIES FROM DEVICE ONE
**/
cudaSetDevice(d1);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
/*
* HOST MEMORY COPIES FROM DEVICE TWO
**/
cudaSetDevice(d2);
// allocate mem for the result on host side
float* h_odata_two = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_odata_two, d_odata_two, mem_size,
cudaMemcpyDeviceToHost) );
if(memcmp(h_odata,h_odata_two,mem_size) != 0) {
printf("FAILED TO BE EQUAL\n");
}
/*
* MEMORY CLEAN UP
**/
// cleanup memory
free( h_odata);
free( h_odata_two);
CUDA_SAFE_CALL(cudaFree(d_idata));
CUDA_SAFE_CALL(cudaFree(d_odata));
CUDA_SAFE_CALL(cudaFree(d_odata_two));
}
void run_remote_memory_access_using_data_copy(float* h_idata, size_t h_size, size_t d1, size_t d2) {
// adjust number of threads here
//unsigned int num_threads = h_size;
int grid_size = 0;
int thread_count = 32;
if ((h_size % thread_count) != 0) {
grid_size = (h_size / thread_count + 1) * thread_count;
}
else {
grid_size = h_size / thread_count;
}
unsigned int mem_size = sizeof( float) * h_size;
printf("TOTAL MEM PER MALLOC %lu\n\n", mem_size);
cudaSetDevice(d1);
// allocate device memory
float* d_idata, *d_odata;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
// allocate device memory for result
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
//printf ("blocks = %d\n", grid_size);
dim3 grid( grid_size, 1, 1);
dim3 threads( thread_count, 1, 1);
// execute the selected kernel
simple_copy_kernel<<< grid, threads,0>>>( d_idata, d_odata, h_size);
cudaError_t cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
printf("ERROR: %s\n\n",cudaGetErrorString(cuerr));
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size,
cudaMemcpyDeviceToHost) );
// change GPU device
cudaSetDevice(d2);
// allocate mem for the result on host side
float* h_odata_two = (float*) malloc( mem_size);
// allocate device memory
float* d_idata_two, *d_odata_two;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata_two, mem_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata_two, mem_size));
CUDA_SAFE_CALL( cudaMemcpy( d_idata_two, h_odata, mem_size,
cudaMemcpyHostToDevice) );
simple_copy_kernel<<< grid, threads,0>>>( d_idata_two, d_odata_two, h_size);
cuerr = cudaGetLastError();
if( cuerr != cudaSuccess) {
printf("CUDA ERROR %d\n\n", cuerr);
}
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_odata_two, d_odata_two, mem_size,
cudaMemcpyDeviceToHost) );
if(memcmp(h_odata,h_odata_two,mem_size) != 0) {
printf("FAILED TO BE EQUAL\n");
}
// cleanup memory
free( h_odata);
free( h_odata_two);
CUDA_SAFE_CALL(cudaFree(d_idata));
CUDA_SAFE_CALL(cudaFree(d_idata_two));
CUDA_SAFE_CALL(cudaFree(d_odata));
CUDA_SAFE_CALL(cudaFree(d_odata_two));
}
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
if (argc != 5) {
printf("%s <memory access 1 local 2 peer-to-peer 3 peer-to-peer-memcpy> <num elements> <device 1 id> <device 2 id>\n", argv[0]);
return 0;
}
CUT_DEVICE_INIT();
int memory_access = atoi(argv[1]);
int num_elements = atoi(argv[2]);
int device_one_id = atoi(argv[3]);
int device_two_id = atoi(argv[4]);
// allocate host memory
if ((num_elements % 32) != 0) {
num_elements = (num_elements / 32 + 1) * 32;
}
printf("number_elements: %d\n\n", num_elements);
float* h_idata = (float*) malloc( sizeof(float)* num_elements);
// initalize the memory
for( unsigned int i = 0; i < num_elements; ++i)
{
h_idata[i] = 0;
}
switch (memory_access) {
case 1:
run_device_mem_local_to_gpu(h_idata,num_elements, device_one_id,device_two_id);
break;
case 2:
run_remote_peer_to_peer_memory_access(h_idata,num_elements,device_one_id,device_two_id);
break;
case 3:
run_remote_memory_access_using_data_copy(h_idata,num_elements,device_one_id,device_two_id);
break;
default:
return -1;
}
free(h_idata);
return 0;
}
|
418409a3c1a6ae49ba5412774811254c00de28d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "resample.h"
#include <stk/common/assert.h>
#include <stk/cuda/cuda.h>
#include <stk/cuda/volume.h>
#include <stk/image/gpu_volume.h>
#include <stk/math/float3.h>
#include <stk/math/float4.h>
#include "gaussian_filter.h"
#include <algorithm>
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
#error Displacement residuals not implemented for CUDA
#endif
namespace cuda = stk::cuda;
template<typename T>
__global__ void shrink_volume_by_2_kernel(
const cuda::VolumePtr<T> in,
dim3 new_dims,
cuda::VolumePtr<T> out
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= new_dims.x ||
y >= new_dims.y ||
z >= new_dims.z)
{
return;
}
out(x, y, z) = in(int(2*x), int(2*y), int(2*z));
}
__global__ void upsample_vectorfield_kernel(
cuda::VolumePtr<float4> src,
dim3 src_dims,
dim3 new_dims,
float3 inv_scale,
cuda::VolumePtr<float4> out
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= new_dims.x ||
y >= new_dims.y ||
z >= new_dims.z)
{
return;
}
out(x, y, z) = cuda::linear_at_clamp<float4>(src, src_dims, x * inv_scale.x, y * inv_scale.y, z * inv_scale.z);
}
namespace {
/// Shrinks the volume by removing every other element
stk::GpuVolume shrink_volume_by_2(const stk::GpuVolume& src)
{
ASSERT(src.voxel_type() == stk::Type_Float ||
src.voxel_type() == stk::Type_Float4);
ASSERT(src.usage() == stk::gpu::Usage_PitchedPointer);
dim3 old_dims = src.size();
dim3 new_dims {
uint32_t(ceil(old_dims.x * 0.5f)),
uint32_t(ceil(old_dims.y * 0.5f)),
uint32_t(ceil(old_dims.z * 0.5f)),
};
stk::GpuVolume dest(new_dims, src.voxel_type());
dest.copy_meta_from(src);
float3 old_spacing = src.spacing();
float3 new_spacing {
old_spacing.x * (old_dims.x / float(new_dims.x)),
old_spacing.y * (old_dims.y / float(new_dims.y)),
old_spacing.z * (old_dims.z / float(new_dims.z))
};
dest.set_spacing(new_spacing);
dim3 block_size{32,32,1};
dim3 grid_size {
(new_dims.x + block_size.x - 1) / block_size.x,
(new_dims.y + block_size.y - 1) / block_size.y,
(new_dims.z + block_size.z - 1) / block_size.z
};
if (src.voxel_type() == stk::Type_Float) {
hipLaunchKernelGGL(( shrink_volume_by_2_kernel<float>), dim3(grid_size), dim3(block_size), 0, 0,
src,
new_dims,
dest
);
}
else if (src.voxel_type() == stk::Type_Float4) {
hipLaunchKernelGGL(( shrink_volume_by_2_kernel<float4>), dim3(grid_size), dim3(block_size), 0, 0,
src,
new_dims,
dest
);
}
else {
ASSERT(false);
}
CUDA_CHECK_ERRORS(hipDeviceSynchronize());
return dest;
}
}
namespace filters {
namespace gpu {
stk::GpuVolume downsample_volume_by_2(const stk::GpuVolume& vol)
{
ASSERT(vol.voxel_type() == stk::Type_Float);
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
float3 spacing = vol.spacing();
float unit_sigma = ::min(spacing.x, ::min(spacing.y, spacing.z));
stk::GpuVolume filtered = gaussian_filter_3d(vol, unit_sigma);
return shrink_volume_by_2(filtered);
}
stk::GpuVolume downsample_vectorfield_by_2(const stk::GpuVolume& vol
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
, stk::GpuVolume& residual
#endif
)
{
ASSERT(vol.voxel_type() == stk::Type_Float4);
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
float3 spacing = vol.spacing();
float unit_sigma = ::min(spacing.x, ::min(spacing.y, spacing.z));
stk::GpuVolume filtered = gaussian_filter_3d(vol, unit_sigma);
return shrink_volume_by_2(filtered);
}
stk::GpuVolume upsample_vectorfield(const stk::GpuVolume& vol, const dim3& new_dims
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
, const stk::GpuVolume& residual
#endif
)
{
ASSERT(vol.voxel_type() == stk::Type_Float4); // No float3 in gpu volumes
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
dim3 old_dims = vol.size();
float3 inv_scale{
float(old_dims.x) / new_dims.x,
float(old_dims.y) / new_dims.y,
float(old_dims.z) / new_dims.z
};
stk::GpuVolume out(new_dims, stk::Type_Float4, stk::gpu::Usage_PitchedPointer);
out.copy_meta_from(vol);
float3 old_spacing = vol.spacing();
float3 new_spacing{
old_spacing.x * inv_scale.x,
old_spacing.y * inv_scale.y,
old_spacing.z * inv_scale.z
};
out.set_spacing(new_spacing);
dim3 block_size{8,8,1};
dim3 grid_size {
(new_dims.x + block_size.x - 1) / block_size.x,
(new_dims.y + block_size.y - 1) / block_size.y,
(new_dims.z + block_size.z - 1) / block_size.z
};
hipLaunchKernelGGL(( upsample_vectorfield_kernel), dim3(grid_size), dim3(block_size), 0, 0,
vol,
vol.size(),
new_dims,
inv_scale,
out
);
CUDA_CHECK_ERRORS(hipDeviceSynchronize());
return out;
}
}
}
| 418409a3c1a6ae49ba5412774811254c00de28d7.cu | #include "resample.h"
#include <stk/common/assert.h>
#include <stk/cuda/cuda.h>
#include <stk/cuda/volume.h>
#include <stk/image/gpu_volume.h>
#include <stk/math/float3.h>
#include <stk/math/float4.h>
#include "gaussian_filter.h"
#include <algorithm>
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
#error Displacement residuals not implemented for CUDA
#endif
namespace cuda = stk::cuda;
template<typename T>
__global__ void shrink_volume_by_2_kernel(
const cuda::VolumePtr<T> in,
dim3 new_dims,
cuda::VolumePtr<T> out
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= new_dims.x ||
y >= new_dims.y ||
z >= new_dims.z)
{
return;
}
out(x, y, z) = in(int(2*x), int(2*y), int(2*z));
}
__global__ void upsample_vectorfield_kernel(
cuda::VolumePtr<float4> src,
dim3 src_dims,
dim3 new_dims,
float3 inv_scale,
cuda::VolumePtr<float4> out
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= new_dims.x ||
y >= new_dims.y ||
z >= new_dims.z)
{
return;
}
out(x, y, z) = cuda::linear_at_clamp<float4>(src, src_dims, x * inv_scale.x, y * inv_scale.y, z * inv_scale.z);
}
namespace {
/// Shrinks the volume by removing every other element
stk::GpuVolume shrink_volume_by_2(const stk::GpuVolume& src)
{
ASSERT(src.voxel_type() == stk::Type_Float ||
src.voxel_type() == stk::Type_Float4);
ASSERT(src.usage() == stk::gpu::Usage_PitchedPointer);
dim3 old_dims = src.size();
dim3 new_dims {
uint32_t(ceil(old_dims.x * 0.5f)),
uint32_t(ceil(old_dims.y * 0.5f)),
uint32_t(ceil(old_dims.z * 0.5f)),
};
stk::GpuVolume dest(new_dims, src.voxel_type());
dest.copy_meta_from(src);
float3 old_spacing = src.spacing();
float3 new_spacing {
old_spacing.x * (old_dims.x / float(new_dims.x)),
old_spacing.y * (old_dims.y / float(new_dims.y)),
old_spacing.z * (old_dims.z / float(new_dims.z))
};
dest.set_spacing(new_spacing);
dim3 block_size{32,32,1};
dim3 grid_size {
(new_dims.x + block_size.x - 1) / block_size.x,
(new_dims.y + block_size.y - 1) / block_size.y,
(new_dims.z + block_size.z - 1) / block_size.z
};
if (src.voxel_type() == stk::Type_Float) {
shrink_volume_by_2_kernel<float><<<grid_size, block_size>>>(
src,
new_dims,
dest
);
}
else if (src.voxel_type() == stk::Type_Float4) {
shrink_volume_by_2_kernel<float4><<<grid_size, block_size>>>(
src,
new_dims,
dest
);
}
else {
ASSERT(false);
}
CUDA_CHECK_ERRORS(cudaDeviceSynchronize());
return dest;
}
}
namespace filters {
namespace gpu {
stk::GpuVolume downsample_volume_by_2(const stk::GpuVolume& vol)
{
ASSERT(vol.voxel_type() == stk::Type_Float);
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
float3 spacing = vol.spacing();
float unit_sigma = std::min(spacing.x, std::min(spacing.y, spacing.z));
stk::GpuVolume filtered = gaussian_filter_3d(vol, unit_sigma);
return shrink_volume_by_2(filtered);
}
stk::GpuVolume downsample_vectorfield_by_2(const stk::GpuVolume& vol
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
, stk::GpuVolume& residual
#endif
)
{
ASSERT(vol.voxel_type() == stk::Type_Float4);
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
float3 spacing = vol.spacing();
float unit_sigma = std::min(spacing.x, std::min(spacing.y, spacing.z));
stk::GpuVolume filtered = gaussian_filter_3d(vol, unit_sigma);
return shrink_volume_by_2(filtered);
}
stk::GpuVolume upsample_vectorfield(const stk::GpuVolume& vol, const dim3& new_dims
#ifdef DF_ENABLE_DISPLACEMENT_FIELD_RESIDUALS
, const stk::GpuVolume& residual
#endif
)
{
ASSERT(vol.voxel_type() == stk::Type_Float4); // No float3 in gpu volumes
ASSERT(vol.usage() == stk::gpu::Usage_PitchedPointer);
dim3 old_dims = vol.size();
float3 inv_scale{
float(old_dims.x) / new_dims.x,
float(old_dims.y) / new_dims.y,
float(old_dims.z) / new_dims.z
};
stk::GpuVolume out(new_dims, stk::Type_Float4, stk::gpu::Usage_PitchedPointer);
out.copy_meta_from(vol);
float3 old_spacing = vol.spacing();
float3 new_spacing{
old_spacing.x * inv_scale.x,
old_spacing.y * inv_scale.y,
old_spacing.z * inv_scale.z
};
out.set_spacing(new_spacing);
dim3 block_size{8,8,1};
dim3 grid_size {
(new_dims.x + block_size.x - 1) / block_size.x,
(new_dims.y + block_size.y - 1) / block_size.y,
(new_dims.z + block_size.z - 1) / block_size.z
};
upsample_vectorfield_kernel<<<grid_size, block_size>>>(
vol,
vol.size(),
new_dims,
inv_scale,
out
);
CUDA_CHECK_ERRORS(cudaDeviceSynchronize());
return out;
}
}
}
|
7168258355240ba05141db0758297baec41ed605.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
hipMalloc ((void **) &device_a, deviceSize);
hipMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
hipMemcpy (device_a, host_a, deviceSize, hipMemcpyHostToDevice );
hipMemcpy (device_b, host_b, deviceSize, hipMemcpyHostToDevice );
// allocate device memory to store computed result
hipMalloc((void **) &device_c, deviceSize) ;
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
hipLaunchKernelGGL(( product), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, device_c, WIDTH);
// copy result from device back to host
hipMemcpy (host_c, device_c, deviceSize, hipMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
hipFree (device_a);
hipFree (device_b);
hipFree (device_c);
return 0;
} | 7168258355240ba05141db0758297baec41ed605.cu | // CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
cudaMalloc ((void **) &device_a, deviceSize);
cudaMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
cudaMemcpy (device_a, host_a, deviceSize, cudaMemcpyHostToDevice );
cudaMemcpy (device_b, host_b, deviceSize, cudaMemcpyHostToDevice );
// allocate device memory to store computed result
cudaMalloc((void **) &device_c, deviceSize) ;
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
product<<<dimGrid, dimBlock>>> (device_a, device_b, device_c, WIDTH);
// copy result from device back to host
cudaMemcpy (host_c, device_c, deviceSize, cudaMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
cudaFree (device_a);
cudaFree (device_b);
cudaFree (device_c);
return 0;
} |
30d2a0ac87464d8092565c75c855a595d9f945c4.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <hip/hip_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
template<typename Type, int DataLayout>
static void test_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<Type, 2, DataLayout> in(num_rows, num_cols);
in.setRandom();
Tensor<Type, 0, DataLayout> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(Type);
std::size_t out_bytes = full_redux.size() * sizeof(Type);
Type* gpu_in_ptr = static_cast<Type*>(gpu_device.allocate(in_bytes));
Type* gpu_out_ptr = static_cast<Type*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<Type, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<Type, 0, DataLayout> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<Type, 0, DataLayout> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
template<typename Type, int DataLayout>
static void test_first_dim_reductions() {
int dim_x = 33;
int dim_y = 1;
int dim_z = 128;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 0;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data(T)
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
template<typename Type, int DataLayout>
static void test_last_dim_reductions() {
int dim_x = 128;
int dim_y = 1;
int dim_z = 33;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 2;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
void test_cxx11_tensor_reduction_cuda() {
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
}
| 30d2a0ac87464d8092565c75c855a595d9f945c4.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <cuda_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
template<typename Type, int DataLayout>
static void test_full_reductions() {
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
const int num_rows = internal::random<int>(1024, 5*1024);
const int num_cols = internal::random<int>(1024, 5*1024);
Tensor<Type, 2, DataLayout> in(num_rows, num_cols);
in.setRandom();
Tensor<Type, 0, DataLayout> full_redux;
full_redux = in.sum();
std::size_t in_bytes = in.size() * sizeof(Type);
std::size_t out_bytes = full_redux.size() * sizeof(Type);
Type* gpu_in_ptr = static_cast<Type*>(gpu_device.allocate(in_bytes));
Type* gpu_out_ptr = static_cast<Type*>(gpu_device.allocate(out_bytes));
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<Type, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
TensorMap<Tensor<Type, 0, DataLayout> > out_gpu(gpu_out_ptr);
out_gpu.device(gpu_device) = in_gpu.sum();
Tensor<Type, 0, DataLayout> full_redux_gpu;
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
template<typename Type, int DataLayout>
static void test_first_dim_reductions() {
int dim_x = 33;
int dim_y = 1;
int dim_z = 128;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 0;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data(T)
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
template<typename Type, int DataLayout>
static void test_last_dim_reductions() {
int dim_x = 128;
int dim_y = 1;
int dim_z = 33;
Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z);
in.setRandom();
Eigen::array<int, 1> red_axis;
red_axis[0] = 2;
Tensor<Type, 2, DataLayout> redux = in.sum(red_axis);
// Create device
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice dev(&stream);
// Create data
Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type));
Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type));
Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z);
Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_x, dim_y);
// Perform operation
dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type));
gpu_out.device(dev) = gpu_in.sum(red_axis);
gpu_out.device(dev) += gpu_in.sum(red_axis);
Tensor<Type, 2, DataLayout> redux_gpu(dim_x, dim_y);
dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type));
dev.synchronize();
// Check that the CPU and GPU reductions return the same result.
for (int i = 0; i < gpu_out.size(); ++i) {
VERIFY_IS_APPROX(2*redux(i), redux_gpu(i));
}
dev.deallocate(in_data);
dev.deallocate(out_data);
}
void test_cxx11_tensor_reduction_cuda() {
CALL_SUBTEST_1((test_full_reductions<float, ColMajor>()));
CALL_SUBTEST_1((test_full_reductions<double, ColMajor>()));
CALL_SUBTEST_2((test_full_reductions<float, RowMajor>()));
CALL_SUBTEST_2((test_full_reductions<double, RowMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<float, ColMajor>()));
CALL_SUBTEST_3((test_first_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_4((test_first_dim_reductions<float, RowMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_4((test_first_dim_reductions<double, RowMajor>()))
CALL_SUBTEST_5((test_last_dim_reductions<float, ColMajor>()));
// Outer reductions of doubles aren't supported just yet.
// CALL_SUBTEST_5((test_last_dim_reductions<double, ColMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<float, RowMajor>()));
CALL_SUBTEST_6((test_last_dim_reductions<double, RowMajor>()));
}
|
84a25f18cb474f79010286f594c9e7ec9b8fd302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**************************************************************************
**
** svd3
**
** Quick singular value decomposition as described by:
** A. McAdams, A. Selle, R. Tamstorf, J. Teran and E. Sifakis,
** Computing the Singular Value Decomposition of 3x3 matrices
** with minimal branching and elementary floating point operations,
** University of Wisconsin - Madison technical report TR1690, May 2011
**
** Identical GPU version
** Implementated by: Kui Wu
** kwu@cs.utah.edu
**
** May 2018
**
**************************************************************************/
#define gone 1065353216
#define gsine_pi_over_eight 1053028117
#define gcosine_pi_over_eight 1064076127
#define gone_half 0.5f
#define gsmall_number 1.e-12f
#define gtiny_number 1.e-20f
#define gfour_gamma_squared 5.8284273147583007813f
#define __fadd_rn(a,b) ((a)+(b))
#define __fsub_rn(a,b) ((a)-(b))
#define __frsqrt_rn(a) (1.f / sqrtf(a))
union un { float f; unsigned int ui; };
__device__ __host__ __forceinline__
void svd(
float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33, // input A
float &u11, float &u12, float &u13, float &u21, float &u22, float &u23, float &u31, float &u32, float &u33, // output U
float &s11,
//float &s12, float &s13, float &s21,
float &s22,
//float &s23, float &s31, float &s32,
float &s33, // output S
float &v11, float &v12, float &v13, float &v21, float &v22, float &v23, float &v31, float &v32, float &v33 // output V
)
{
un Sa11, Sa21, Sa31, Sa12, Sa22, Sa32, Sa13, Sa23, Sa33;
un Su11, Su21, Su31, Su12, Su22, Su32, Su13, Su23, Su33;
un Sv11, Sv21, Sv31, Sv12, Sv22, Sv32, Sv13, Sv23, Sv33;
un Sc, Ss, Sch, Ssh;
un Stmp1, Stmp2, Stmp3, Stmp4, Stmp5;
un Ss11, Ss21, Ss31, Ss22, Ss32, Ss33;
un Sqvs, Sqvvx, Sqvvy, Sqvvz;
Sa11.f = a11; Sa12.f = a12; Sa13.f = a13;
Sa21.f = a21; Sa22.f = a22; Sa23.f = a23;
Sa31.f = a31; Sa32.f = a32; Sa33.f = a33;
//###########################################################
// Compute normal equations matrix
//###########################################################
Ss11.f = Sa11.f*Sa11.f;
Stmp1.f = Sa21.f*Sa21.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Stmp1.f = Sa31.f*Sa31.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Ss21.f = Sa12.f*Sa11.f;
Stmp1.f = Sa22.f*Sa21.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Stmp1.f = Sa32.f*Sa31.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Ss31.f = Sa13.f*Sa11.f;
Stmp1.f = Sa23.f*Sa21.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Stmp1.f = Sa33.f*Sa31.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Ss22.f = Sa12.f*Sa12.f;
Stmp1.f = Sa22.f*Sa22.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Stmp1.f = Sa32.f*Sa32.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Ss32.f = Sa13.f*Sa12.f;
Stmp1.f = Sa23.f*Sa22.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Stmp1.f = Sa33.f*Sa32.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Ss33.f = Sa13.f*Sa13.f;
Stmp1.f = Sa23.f*Sa23.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Stmp1.f = Sa33.f*Sa33.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Sqvs.f = 1.f; Sqvvx.f = 0.f; Sqvvy.f = 0.f; Sqvvz.f = 0.f;
//###########################################################
// Solve symmetric eigenproblem using Jacobi iteration
//###########################################################
for (int i = 0; i < 4; i++)
{
Ssh.f = Ss21.f * 0.5f;
Stmp5.f = __fsub_rn(Ss11.f, Ss22.f);
Stmp2.f = Ssh.f*Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f*Ssh.f;
Sch.f = Stmp4.f*Sch.f;
Stmp1.f = gfour_gamma_squared*Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f * Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss33.f = Ss33.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss33.f = Ss33.f * Stmp3.f;
Stmp1.f = Ss.f * Ss31.f;
Stmp2.f = Ss.f * Ss32.f;
Ss31.f = Sc.f * Ss31.f;
Ss32.f = Sc.f * Ss32.f;
Ss31.f = __fadd_rn(Stmp2.f, Ss31.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss22.f*Stmp2.f;
Stmp3.f = Ss11.f*Stmp2.f;
Stmp4.f = Sc.f*Sc.f;
Ss11.f = Ss11.f*Stmp4.f;
Ss22.f = Ss22.f*Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp1.f);
Ss22.f = __fadd_rn(Ss22.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss21.f, Ss21.f);
Ss21.f = Ss21.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp2.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp5.f);
Ss22.f = __fsub_rn(Ss22.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvz.f = __fadd_rn(Sqvvz.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp3.f);
Sqvvx.f = __fadd_rn(Sqvvx.f, Stmp2.f);
Sqvvy.f = __fsub_rn(Sqvvy.f, Stmp1.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
//////////////////////////////////////////////////////////////////////////
// (1->3)
//////////////////////////////////////////////////////////////////////////
Ssh.f = Ss32.f * 0.5f;
Stmp5.f = __fsub_rn(Ss22.f, Ss33.f);
Stmp2.f = Ssh.f * Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f * Ssh.f;
Sch.f = Stmp4.f * Sch.f;
Stmp1.f = gfour_gamma_squared * Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f*Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss11.f = Ss11.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss11.f = Ss11.f * Stmp3.f;
Stmp1.f = Ss.f*Ss21.f;
Stmp2.f = Ss.f*Ss31.f;
Ss21.f = Sc.f*Ss21.f;
Ss31.f = Sc.f*Ss31.f;
Ss21.f = __fadd_rn(Stmp2.f, Ss21.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss33.f*Stmp2.f;
Stmp3.f = Ss22.f*Stmp2.f;
Stmp4.f = Sc.f * Sc.f;
Ss22.f = Ss22.f * Stmp4.f;
Ss33.f = Ss33.f * Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp1.f);
Ss33.f = __fadd_rn(Ss33.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss32.f, Ss32.f);
Ss32.f = Ss32.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp2.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp5.f);
Ss33.f = __fsub_rn(Ss33.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvx.f = __fadd_rn(Sqvvx.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp1.f);
Sqvvy.f = __fadd_rn(Sqvvy.f, Stmp3.f);
Sqvvz.f = __fsub_rn(Sqvvz.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
Ssh.f = Ss31.f * 0.5f;
Stmp5.f = __fsub_rn(Ss33.f, Ss11.f);
Stmp2.f = Ssh.f*Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f*Ssh.f;
Sch.f = Stmp4.f*Sch.f;
Stmp1.f = gfour_gamma_squared*Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f*Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss22.f = Ss22.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss22.f = Ss22.f * Stmp3.f;
Stmp1.f = Ss.f*Ss32.f;
Stmp2.f = Ss.f*Ss21.f;
Ss32.f = Sc.f*Ss32.f;
Ss21.f = Sc.f*Ss21.f;
Ss32.f = __fadd_rn(Stmp2.f, Ss32.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss11.f*Stmp2.f;
Stmp3.f = Ss33.f*Stmp2.f;
Stmp4.f = Sc.f*Sc.f;
Ss33.f = Ss33.f*Stmp4.f;
Ss11.f = Ss11.f*Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp1.f);
Ss11.f = __fadd_rn(Ss11.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss31.f, Ss31.f);
Ss31.f = Ss31.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp2.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp5.f);
Ss11.f = __fsub_rn(Ss11.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvy.f = __fadd_rn(Sqvvy.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp2.f);
Sqvvz.f = __fadd_rn(Sqvvz.f, Stmp1.f);
Sqvvx.f = __fsub_rn(Sqvvx.f, Stmp3.f);
}
//###########################################################
// Normalize quaternion for matrix V
//###########################################################
Stmp2.f = Sqvs.f*Sqvs.f;
Stmp1.f = Sqvvx.f*Sqvvx.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvy.f*Sqvvy.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvz.f*Sqvvz.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sqvs.f = Sqvs.f*Stmp1.f;
Sqvvx.f = Sqvvx.f*Stmp1.f;
Sqvvy.f = Sqvvy.f*Stmp1.f;
Sqvvz.f = Sqvvz.f*Stmp1.f;
//###########################################################
// Transform quaternion to matrix V
//###########################################################
Stmp1.f = Sqvvx.f*Sqvvx.f;
Stmp2.f = Sqvvy.f*Sqvvy.f;
Stmp3.f = Sqvvz.f*Sqvvz.f;
Sv11.f = Sqvs.f*Sqvs.f;
Sv22.f = __fsub_rn(Sv11.f, Stmp1.f);
Sv33.f = __fsub_rn(Sv22.f, Stmp2.f);
Sv33.f = __fadd_rn(Sv33.f, Stmp3.f);
Sv22.f = __fadd_rn(Sv22.f, Stmp2.f);
Sv22.f = __fsub_rn(Sv22.f, Stmp3.f);
Sv11.f = __fadd_rn(Sv11.f, Stmp1.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp2.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp3.f);
Stmp1.f = __fadd_rn(Sqvvx.f, Sqvvx.f);
Stmp2.f = __fadd_rn(Sqvvy.f, Sqvvy.f);
Stmp3.f = __fadd_rn(Sqvvz.f, Sqvvz.f);
Sv32.f = Sqvs.f*Stmp1.f;
Sv13.f = Sqvs.f*Stmp2.f;
Sv21.f = Sqvs.f*Stmp3.f;
Stmp1.f = Sqvvy.f*Stmp1.f;
Stmp2.f = Sqvvz.f*Stmp2.f;
Stmp3.f = Sqvvx.f*Stmp3.f;
Sv12.f = __fsub_rn(Stmp1.f, Sv21.f);
Sv23.f = __fsub_rn(Stmp2.f, Sv32.f);
Sv31.f = __fsub_rn(Stmp3.f, Sv13.f);
Sv21.f = __fadd_rn(Stmp1.f, Sv21.f);
Sv32.f = __fadd_rn(Stmp2.f, Sv32.f);
Sv13.f = __fadd_rn(Stmp3.f, Sv13.f);
///###########################################################
// Multiply (from the right) with V
//###########################################################
Stmp2.f = Sa12.f;
Stmp3.f = Sa13.f;
Sa12.f = Sv12.f*Sa11.f;
Sa13.f = Sv13.f*Sa11.f;
Sa11.f = Sv11.f*Sa11.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp2.f = Sa22.f;
Stmp3.f = Sa23.f;
Sa22.f = Sv12.f*Sa21.f;
Sa23.f = Sv13.f*Sa21.f;
Sa21.f = Sv11.f*Sa21.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp2.f = Sa32.f;
Stmp3.f = Sa33.f;
Sa32.f = Sv12.f*Sa31.f;
Sa33.f = Sv13.f*Sa31.f;
Sa31.f = Sv11.f*Sa31.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
//###########################################################
// Permute columns such that the singular values are sorted
//###########################################################
Stmp1.f = Sa11.f*Sa11.f;
Stmp4.f = Sa21.f*Sa21.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp4.f = Sa31.f*Sa31.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp2.f = Sa12.f*Sa12.f;
Stmp4.f = Sa22.f*Sa22.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp4.f = Sa32.f*Sa32.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp3.f = Sa13.f*Sa13.f;
Stmp4.f = Sa23.f*Sa23.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
Stmp4.f = Sa33.f*Sa33.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
// Swap columns 1-2 if necessary
Stmp4.ui = (Stmp1.f < Stmp2.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui^Sa12.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa11.ui = Sa11.ui^Stmp5.ui;
Sa12.ui = Sa12.ui^Stmp5.ui;
Stmp5.ui = Sa21.ui^Sa22.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa21.ui = Sa21.ui^Stmp5.ui;
Sa22.ui = Sa22.ui^Stmp5.ui;
Stmp5.ui = Sa31.ui^Sa32.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa31.ui = Sa31.ui^Stmp5.ui;
Sa32.ui = Sa32.ui^Stmp5.ui;
Stmp5.ui = Sv11.ui^Sv12.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv11.ui = Sv11.ui^Stmp5.ui;
Sv12.ui = Sv12.ui^Stmp5.ui;
Stmp5.ui = Sv21.ui^Sv22.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv21.ui = Sv21.ui^Stmp5.ui;
Sv22.ui = Sv22.ui^Stmp5.ui;
Stmp5.ui = Sv31.ui^Sv32.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv31.ui = Sv31.ui^Stmp5.ui;
Sv32.ui = Sv32.ui^Stmp5.ui;
Stmp5.ui = Stmp1.ui^Stmp2.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp1.ui = Stmp1.ui^Stmp5.ui;
Stmp2.ui = Stmp2.ui^Stmp5.ui;
// If columns 1-2 have been swapped, negate 2nd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa12.f = Sa12.f*Stmp4.f;
Sa22.f = Sa22.f*Stmp4.f;
Sa32.f = Sa32.f*Stmp4.f;
Sv12.f = Sv12.f*Stmp4.f;
Sv22.f = Sv22.f*Stmp4.f;
Sv32.f = Sv32.f*Stmp4.f;
// Swap columns 1-3 if necessary
Stmp4.ui = (Stmp1.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui^Sa13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa11.ui = Sa11.ui^Stmp5.ui;
Sa13.ui = Sa13.ui^Stmp5.ui;
Stmp5.ui = Sa21.ui^Sa23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa21.ui = Sa21.ui^Stmp5.ui;
Sa23.ui = Sa23.ui^Stmp5.ui;
Stmp5.ui = Sa31.ui^Sa33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa31.ui = Sa31.ui^Stmp5.ui;
Sa33.ui = Sa33.ui^Stmp5.ui;
Stmp5.ui = Sv11.ui^Sv13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv11.ui = Sv11.ui^Stmp5.ui;
Sv13.ui = Sv13.ui^Stmp5.ui;
Stmp5.ui = Sv21.ui^Sv23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv21.ui = Sv21.ui^Stmp5.ui;
Sv23.ui = Sv23.ui^Stmp5.ui;
Stmp5.ui = Sv31.ui^Sv33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv31.ui = Sv31.ui^Stmp5.ui;
Sv33.ui = Sv33.ui^Stmp5.ui;
Stmp5.ui = Stmp1.ui^Stmp3.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp1.ui = Stmp1.ui^Stmp5.ui;
Stmp3.ui = Stmp3.ui^Stmp5.ui;
// If columns 1-3 have been swapped, negate 1st column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa11.f = Sa11.f*Stmp4.f;
Sa21.f = Sa21.f*Stmp4.f;
Sa31.f = Sa31.f*Stmp4.f;
Sv11.f = Sv11.f*Stmp4.f;
Sv21.f = Sv21.f*Stmp4.f;
Sv31.f = Sv31.f*Stmp4.f;
// Swap columns 2-3 if necessary
Stmp4.ui = (Stmp2.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa12.ui^Sa13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa12.ui = Sa12.ui^Stmp5.ui;
Sa13.ui = Sa13.ui^Stmp5.ui;
Stmp5.ui = Sa22.ui^Sa23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa22.ui = Sa22.ui^Stmp5.ui;
Sa23.ui = Sa23.ui^Stmp5.ui;
Stmp5.ui = Sa32.ui^Sa33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa32.ui = Sa32.ui^Stmp5.ui;
Sa33.ui = Sa33.ui^Stmp5.ui;
Stmp5.ui = Sv12.ui^Sv13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv12.ui = Sv12.ui^Stmp5.ui;
Sv13.ui = Sv13.ui^Stmp5.ui;
Stmp5.ui = Sv22.ui^Sv23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv22.ui = Sv22.ui^Stmp5.ui;
Sv23.ui = Sv23.ui^Stmp5.ui;
Stmp5.ui = Sv32.ui^Sv33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv32.ui = Sv32.ui^Stmp5.ui;
Sv33.ui = Sv33.ui^Stmp5.ui;
Stmp5.ui = Stmp2.ui^Stmp3.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp2.ui = Stmp2.ui^Stmp5.ui;
Stmp3.ui = Stmp3.ui^Stmp5.ui;
// If columns 2-3 have been swapped, negate 3rd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa13.f = Sa13.f*Stmp4.f;
Sa23.f = Sa23.f*Stmp4.f;
Sa33.f = Sa33.f*Stmp4.f;
Sv13.f = Sv13.f*Stmp4.f;
Sv23.f = Sv23.f*Stmp4.f;
Sv33.f = Sv33.f*Stmp4.f;
//###########################################################
// Construct QR factorization of A*V (=U*D) using Givens rotations
//###########################################################
Su11.f = 1.f; Su12.f = 0.f; Su13.f = 0.f;
Su21.f = 0.f; Su22.f = 1.f; Su23.f = 0.f;
Su31.f = 0.f; Su32.f = 0.f; Su33.f = 1.f;
Ssh.f = Sa21.f*Sa21.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa21.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = fmaxf(Sch.f, Sa11.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa11.f;
Stmp2.f = Ss.f*Sa21.f;
Sa11.f = Sc.f*Sa11.f;
Sa21.f = Sc.f*Sa21.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa21.f = __fsub_rn(Sa21.f, Stmp1.f);
Stmp1.f = Ss.f*Sa12.f;
Stmp2.f = Ss.f*Sa22.f;
Sa12.f = Sc.f*Sa12.f;
Sa22.f = Sc.f*Sa22.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa22.f = __fsub_rn(Sa22.f, Stmp1.f);
Stmp1.f = Ss.f*Sa13.f;
Stmp2.f = Ss.f*Sa23.f;
Sa13.f = Sc.f*Sa13.f;
Sa23.f = Sc.f*Sa23.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa23.f = __fsub_rn(Sa23.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su11.f;
Stmp2.f = Ss.f*Su12.f;
Su11.f = Sc.f*Su11.f;
Su12.f = Sc.f*Su12.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su12.f = __fsub_rn(Su12.f, Stmp1.f);
Stmp1.f = Ss.f*Su21.f;
Stmp2.f = Ss.f*Su22.f;
Su21.f = Sc.f*Su21.f;
Su22.f = Sc.f*Su22.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su22.f = __fsub_rn(Su22.f, Stmp1.f);
Stmp1.f = Ss.f*Su31.f;
Stmp2.f = Ss.f*Su32.f;
Su31.f = Sc.f*Su31.f;
Su32.f = Sc.f*Su32.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su32.f = __fsub_rn(Su32.f, Stmp1.f);
// Second Givens rotation
Ssh.f = Sa31.f*Sa31.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa31.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = fmaxf(Sch.f, Sa11.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa11.f;
Stmp2.f = Ss.f*Sa31.f;
Sa11.f = Sc.f*Sa11.f;
Sa31.f = Sc.f*Sa31.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f*Sa12.f;
Stmp2.f = Ss.f*Sa32.f;
Sa12.f = Sc.f*Sa12.f;
Sa32.f = Sc.f*Sa32.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f*Sa13.f;
Stmp2.f = Ss.f*Sa33.f;
Sa13.f = Sc.f*Sa13.f;
Sa33.f = Sc.f*Sa33.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su11.f;
Stmp2.f = Ss.f*Su13.f;
Su11.f = Sc.f*Su11.f;
Su13.f = Sc.f*Su13.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f*Su21.f;
Stmp2.f = Ss.f*Su23.f;
Su21.f = Sc.f*Su21.f;
Su23.f = Sc.f*Su23.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f*Su31.f;
Stmp2.f = Ss.f*Su33.f;
Su31.f = Sc.f*Su31.f;
Su33.f = Sc.f*Su33.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
// Third Givens Rotation
Ssh.f = Sa32.f*Sa32.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa32.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa22.f);
Sch.f = fmaxf(Sch.f, Sa22.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa22.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa21.f;
Stmp2.f = Ss.f*Sa31.f;
Sa21.f = Sc.f*Sa21.f;
Sa31.f = Sc.f*Sa31.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f*Sa22.f;
Stmp2.f = Ss.f*Sa32.f;
Sa22.f = Sc.f*Sa22.f;
Sa32.f = Sc.f*Sa32.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f*Sa23.f;
Stmp2.f = Ss.f*Sa33.f;
Sa23.f = Sc.f*Sa23.f;
Sa33.f = Sc.f*Sa33.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su12.f;
Stmp2.f = Ss.f*Su13.f;
Su12.f = Sc.f*Su12.f;
Su13.f = Sc.f*Su13.f;
Su12.f = __fadd_rn(Su12.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f*Su22.f;
Stmp2.f = Ss.f*Su23.f;
Su22.f = Sc.f*Su22.f;
Su23.f = Sc.f*Su23.f;
Su22.f = __fadd_rn(Su22.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f*Su32.f;
Stmp2.f = Ss.f*Su33.f;
Su32.f = Sc.f*Su32.f;
Su33.f = Sc.f*Su33.f;
Su32.f = __fadd_rn(Su32.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
v11 = Sv11.f; v12 = Sv12.f; v13 = Sv13.f;
v21 = Sv21.f; v22 = Sv22.f; v23 = Sv23.f;
v31 = Sv31.f; v32 = Sv32.f; v33 = Sv33.f;
u11 = Su11.f; u12 = Su12.f; u13 = Su13.f;
u21 = Su21.f; u22 = Su22.f; u23 = Su23.f;
u31 = Su31.f; u32 = Su32.f; u33 = Su33.f;
s11 = Sa11.f;
//s12 = Sa12.f; s13 = Sa13.f; s21 = Sa21.f;
s22 = Sa22.f;
//s23 = Sa23.f; s31 = Sa31.f; s32 = Sa32.f;
s33 = Sa33.f;
}
__global__ void svd3_SOA(const float*__restrict__ input,
float*__restrict__ output,
const int testsize)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= testsize) return;
svd(
input[tid + 0 * testsize], input[tid + 1 * testsize], input[tid + 2 * testsize],
input[tid + 3 * testsize], input[tid + 4 * testsize], input[tid + 5 * testsize],
input[tid + 6 * testsize], input[tid + 7 * testsize], input[tid + 8 * testsize],
output[tid + 0 * testsize], output[tid + 1 * testsize], output[tid + 2 * testsize],
output[tid + 3 * testsize], output[tid + 4 * testsize], output[tid + 5 * testsize],
output[tid + 6 * testsize], output[tid + 7 * testsize], output[tid + 8 * testsize],
output[tid + 9 * testsize], output[tid + 10 * testsize], output[tid + 11 * testsize],
output[tid + 12 * testsize], output[tid + 13 * testsize], output[tid + 14 * testsize],
output[tid + 15 * testsize], output[tid + 16 * testsize], output[tid + 17 * testsize],
output[tid + 18 * testsize], output[tid + 19 * testsize], output[tid + 20 * testsize]
);
}
| 84a25f18cb474f79010286f594c9e7ec9b8fd302.cu | /**************************************************************************
**
** svd3
**
** Quick singular value decomposition as described by:
** A. McAdams, A. Selle, R. Tamstorf, J. Teran and E. Sifakis,
** Computing the Singular Value Decomposition of 3x3 matrices
** with minimal branching and elementary floating point operations,
** University of Wisconsin - Madison technical report TR1690, May 2011
**
** Identical GPU version
** Implementated by: Kui Wu
** kwu@cs.utah.edu
**
** May 2018
**
**************************************************************************/
#define gone 1065353216
#define gsine_pi_over_eight 1053028117
#define gcosine_pi_over_eight 1064076127
#define gone_half 0.5f
#define gsmall_number 1.e-12f
#define gtiny_number 1.e-20f
#define gfour_gamma_squared 5.8284273147583007813f
#define __fadd_rn(a,b) ((a)+(b))
#define __fsub_rn(a,b) ((a)-(b))
#define __frsqrt_rn(a) (1.f / sqrtf(a))
union un { float f; unsigned int ui; };
__device__ __host__ __forceinline__
void svd(
float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33, // input A
float &u11, float &u12, float &u13, float &u21, float &u22, float &u23, float &u31, float &u32, float &u33, // output U
float &s11,
//float &s12, float &s13, float &s21,
float &s22,
//float &s23, float &s31, float &s32,
float &s33, // output S
float &v11, float &v12, float &v13, float &v21, float &v22, float &v23, float &v31, float &v32, float &v33 // output V
)
{
un Sa11, Sa21, Sa31, Sa12, Sa22, Sa32, Sa13, Sa23, Sa33;
un Su11, Su21, Su31, Su12, Su22, Su32, Su13, Su23, Su33;
un Sv11, Sv21, Sv31, Sv12, Sv22, Sv32, Sv13, Sv23, Sv33;
un Sc, Ss, Sch, Ssh;
un Stmp1, Stmp2, Stmp3, Stmp4, Stmp5;
un Ss11, Ss21, Ss31, Ss22, Ss32, Ss33;
un Sqvs, Sqvvx, Sqvvy, Sqvvz;
Sa11.f = a11; Sa12.f = a12; Sa13.f = a13;
Sa21.f = a21; Sa22.f = a22; Sa23.f = a23;
Sa31.f = a31; Sa32.f = a32; Sa33.f = a33;
//###########################################################
// Compute normal equations matrix
//###########################################################
Ss11.f = Sa11.f*Sa11.f;
Stmp1.f = Sa21.f*Sa21.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Stmp1.f = Sa31.f*Sa31.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Ss21.f = Sa12.f*Sa11.f;
Stmp1.f = Sa22.f*Sa21.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Stmp1.f = Sa32.f*Sa31.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Ss31.f = Sa13.f*Sa11.f;
Stmp1.f = Sa23.f*Sa21.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Stmp1.f = Sa33.f*Sa31.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Ss22.f = Sa12.f*Sa12.f;
Stmp1.f = Sa22.f*Sa22.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Stmp1.f = Sa32.f*Sa32.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Ss32.f = Sa13.f*Sa12.f;
Stmp1.f = Sa23.f*Sa22.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Stmp1.f = Sa33.f*Sa32.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Ss33.f = Sa13.f*Sa13.f;
Stmp1.f = Sa23.f*Sa23.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Stmp1.f = Sa33.f*Sa33.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Sqvs.f = 1.f; Sqvvx.f = 0.f; Sqvvy.f = 0.f; Sqvvz.f = 0.f;
//###########################################################
// Solve symmetric eigenproblem using Jacobi iteration
//###########################################################
for (int i = 0; i < 4; i++)
{
Ssh.f = Ss21.f * 0.5f;
Stmp5.f = __fsub_rn(Ss11.f, Ss22.f);
Stmp2.f = Ssh.f*Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f*Ssh.f;
Sch.f = Stmp4.f*Sch.f;
Stmp1.f = gfour_gamma_squared*Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f * Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss33.f = Ss33.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss33.f = Ss33.f * Stmp3.f;
Stmp1.f = Ss.f * Ss31.f;
Stmp2.f = Ss.f * Ss32.f;
Ss31.f = Sc.f * Ss31.f;
Ss32.f = Sc.f * Ss32.f;
Ss31.f = __fadd_rn(Stmp2.f, Ss31.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss22.f*Stmp2.f;
Stmp3.f = Ss11.f*Stmp2.f;
Stmp4.f = Sc.f*Sc.f;
Ss11.f = Ss11.f*Stmp4.f;
Ss22.f = Ss22.f*Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp1.f);
Ss22.f = __fadd_rn(Ss22.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss21.f, Ss21.f);
Ss21.f = Ss21.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp2.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp5.f);
Ss22.f = __fsub_rn(Ss22.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvz.f = __fadd_rn(Sqvvz.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp3.f);
Sqvvx.f = __fadd_rn(Sqvvx.f, Stmp2.f);
Sqvvy.f = __fsub_rn(Sqvvy.f, Stmp1.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
//////////////////////////////////////////////////////////////////////////
// (1->3)
//////////////////////////////////////////////////////////////////////////
Ssh.f = Ss32.f * 0.5f;
Stmp5.f = __fsub_rn(Ss22.f, Ss33.f);
Stmp2.f = Ssh.f * Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f * Ssh.f;
Sch.f = Stmp4.f * Sch.f;
Stmp1.f = gfour_gamma_squared * Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f*Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss11.f = Ss11.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss11.f = Ss11.f * Stmp3.f;
Stmp1.f = Ss.f*Ss21.f;
Stmp2.f = Ss.f*Ss31.f;
Ss21.f = Sc.f*Ss21.f;
Ss31.f = Sc.f*Ss31.f;
Ss21.f = __fadd_rn(Stmp2.f, Ss21.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss33.f*Stmp2.f;
Stmp3.f = Ss22.f*Stmp2.f;
Stmp4.f = Sc.f * Sc.f;
Ss22.f = Ss22.f * Stmp4.f;
Ss33.f = Ss33.f * Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp1.f);
Ss33.f = __fadd_rn(Ss33.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss32.f, Ss32.f);
Ss32.f = Ss32.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp2.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp5.f);
Ss33.f = __fsub_rn(Ss33.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvx.f = __fadd_rn(Sqvvx.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp1.f);
Sqvvy.f = __fadd_rn(Sqvvy.f, Stmp3.f);
Sqvvz.f = __fsub_rn(Sqvvz.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
Ssh.f = Ss31.f * 0.5f;
Stmp5.f = __fsub_rn(Ss33.f, Ss11.f);
Stmp2.f = Ssh.f*Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui&Ssh.ui;
Sch.ui = Stmp1.ui&Stmp5.ui;
Stmp2.ui = ~Stmp1.ui&gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f*Ssh.f;
Sch.f = Stmp4.f*Sch.f;
Stmp1.f = gfour_gamma_squared*Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight&Stmp1.ui;
Ssh.ui = ~Stmp1.ui&Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui;
Sch.ui = ~Stmp1.ui&Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f*Ssh.f;
Stmp2.f = Sch.f*Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f*Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss22.f = Ss22.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss22.f = Ss22.f * Stmp3.f;
Stmp1.f = Ss.f*Ss32.f;
Stmp2.f = Ss.f*Ss21.f;
Ss32.f = Sc.f*Ss32.f;
Ss21.f = Sc.f*Ss21.f;
Ss32.f = __fadd_rn(Stmp2.f, Ss32.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp1.f);
Stmp2.f = Ss.f*Ss.f;
Stmp1.f = Ss11.f*Stmp2.f;
Stmp3.f = Ss33.f*Stmp2.f;
Stmp4.f = Sc.f*Sc.f;
Ss33.f = Ss33.f*Stmp4.f;
Ss11.f = Ss11.f*Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp1.f);
Ss11.f = __fadd_rn(Ss11.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss31.f, Ss31.f);
Ss31.f = Ss31.f*Stmp4.f;
Stmp4.f = Sc.f*Ss.f;
Stmp2.f = Stmp2.f*Stmp4.f;
Stmp5.f = Stmp5.f*Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp2.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp5.f);
Ss11.f = __fsub_rn(Ss11.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f*Sqvvx.f;
Stmp2.f = Ssh.f*Sqvvy.f;
Stmp3.f = Ssh.f*Sqvvz.f;
Ssh.f = Ssh.f*Sqvs.f;
Sqvs.f = Sch.f*Sqvs.f;
Sqvvx.f = Sch.f*Sqvvx.f;
Sqvvy.f = Sch.f*Sqvvy.f;
Sqvvz.f = Sch.f*Sqvvz.f;
Sqvvy.f = __fadd_rn(Sqvvy.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp2.f);
Sqvvz.f = __fadd_rn(Sqvvz.f, Stmp1.f);
Sqvvx.f = __fsub_rn(Sqvvx.f, Stmp3.f);
}
//###########################################################
// Normalize quaternion for matrix V
//###########################################################
Stmp2.f = Sqvs.f*Sqvs.f;
Stmp1.f = Sqvvx.f*Sqvvx.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvy.f*Sqvvy.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvz.f*Sqvvz.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sqvs.f = Sqvs.f*Stmp1.f;
Sqvvx.f = Sqvvx.f*Stmp1.f;
Sqvvy.f = Sqvvy.f*Stmp1.f;
Sqvvz.f = Sqvvz.f*Stmp1.f;
//###########################################################
// Transform quaternion to matrix V
//###########################################################
Stmp1.f = Sqvvx.f*Sqvvx.f;
Stmp2.f = Sqvvy.f*Sqvvy.f;
Stmp3.f = Sqvvz.f*Sqvvz.f;
Sv11.f = Sqvs.f*Sqvs.f;
Sv22.f = __fsub_rn(Sv11.f, Stmp1.f);
Sv33.f = __fsub_rn(Sv22.f, Stmp2.f);
Sv33.f = __fadd_rn(Sv33.f, Stmp3.f);
Sv22.f = __fadd_rn(Sv22.f, Stmp2.f);
Sv22.f = __fsub_rn(Sv22.f, Stmp3.f);
Sv11.f = __fadd_rn(Sv11.f, Stmp1.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp2.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp3.f);
Stmp1.f = __fadd_rn(Sqvvx.f, Sqvvx.f);
Stmp2.f = __fadd_rn(Sqvvy.f, Sqvvy.f);
Stmp3.f = __fadd_rn(Sqvvz.f, Sqvvz.f);
Sv32.f = Sqvs.f*Stmp1.f;
Sv13.f = Sqvs.f*Stmp2.f;
Sv21.f = Sqvs.f*Stmp3.f;
Stmp1.f = Sqvvy.f*Stmp1.f;
Stmp2.f = Sqvvz.f*Stmp2.f;
Stmp3.f = Sqvvx.f*Stmp3.f;
Sv12.f = __fsub_rn(Stmp1.f, Sv21.f);
Sv23.f = __fsub_rn(Stmp2.f, Sv32.f);
Sv31.f = __fsub_rn(Stmp3.f, Sv13.f);
Sv21.f = __fadd_rn(Stmp1.f, Sv21.f);
Sv32.f = __fadd_rn(Stmp2.f, Sv32.f);
Sv13.f = __fadd_rn(Stmp3.f, Sv13.f);
///###########################################################
// Multiply (from the right) with V
//###########################################################
Stmp2.f = Sa12.f;
Stmp3.f = Sa13.f;
Sa12.f = Sv12.f*Sa11.f;
Sa13.f = Sv13.f*Sa11.f;
Sa11.f = Sv11.f*Sa11.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp2.f = Sa22.f;
Stmp3.f = Sa23.f;
Sa22.f = Sv12.f*Sa21.f;
Sa23.f = Sv13.f*Sa21.f;
Sa21.f = Sv11.f*Sa21.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp2.f = Sa32.f;
Stmp3.f = Sa33.f;
Sa32.f = Sv12.f*Sa31.f;
Sa33.f = Sv13.f*Sa31.f;
Sa31.f = Sv11.f*Sa31.f;
Stmp1.f = Sv21.f*Stmp2.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv31.f*Stmp3.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv22.f*Stmp2.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv32.f*Stmp3.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv23.f*Stmp2.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
Stmp1.f = Sv33.f*Stmp3.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
//###########################################################
// Permute columns such that the singular values are sorted
//###########################################################
Stmp1.f = Sa11.f*Sa11.f;
Stmp4.f = Sa21.f*Sa21.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp4.f = Sa31.f*Sa31.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp2.f = Sa12.f*Sa12.f;
Stmp4.f = Sa22.f*Sa22.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp4.f = Sa32.f*Sa32.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp3.f = Sa13.f*Sa13.f;
Stmp4.f = Sa23.f*Sa23.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
Stmp4.f = Sa33.f*Sa33.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
// Swap columns 1-2 if necessary
Stmp4.ui = (Stmp1.f < Stmp2.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui^Sa12.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa11.ui = Sa11.ui^Stmp5.ui;
Sa12.ui = Sa12.ui^Stmp5.ui;
Stmp5.ui = Sa21.ui^Sa22.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa21.ui = Sa21.ui^Stmp5.ui;
Sa22.ui = Sa22.ui^Stmp5.ui;
Stmp5.ui = Sa31.ui^Sa32.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa31.ui = Sa31.ui^Stmp5.ui;
Sa32.ui = Sa32.ui^Stmp5.ui;
Stmp5.ui = Sv11.ui^Sv12.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv11.ui = Sv11.ui^Stmp5.ui;
Sv12.ui = Sv12.ui^Stmp5.ui;
Stmp5.ui = Sv21.ui^Sv22.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv21.ui = Sv21.ui^Stmp5.ui;
Sv22.ui = Sv22.ui^Stmp5.ui;
Stmp5.ui = Sv31.ui^Sv32.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv31.ui = Sv31.ui^Stmp5.ui;
Sv32.ui = Sv32.ui^Stmp5.ui;
Stmp5.ui = Stmp1.ui^Stmp2.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp1.ui = Stmp1.ui^Stmp5.ui;
Stmp2.ui = Stmp2.ui^Stmp5.ui;
// If columns 1-2 have been swapped, negate 2nd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa12.f = Sa12.f*Stmp4.f;
Sa22.f = Sa22.f*Stmp4.f;
Sa32.f = Sa32.f*Stmp4.f;
Sv12.f = Sv12.f*Stmp4.f;
Sv22.f = Sv22.f*Stmp4.f;
Sv32.f = Sv32.f*Stmp4.f;
// Swap columns 1-3 if necessary
Stmp4.ui = (Stmp1.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui^Sa13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa11.ui = Sa11.ui^Stmp5.ui;
Sa13.ui = Sa13.ui^Stmp5.ui;
Stmp5.ui = Sa21.ui^Sa23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa21.ui = Sa21.ui^Stmp5.ui;
Sa23.ui = Sa23.ui^Stmp5.ui;
Stmp5.ui = Sa31.ui^Sa33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa31.ui = Sa31.ui^Stmp5.ui;
Sa33.ui = Sa33.ui^Stmp5.ui;
Stmp5.ui = Sv11.ui^Sv13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv11.ui = Sv11.ui^Stmp5.ui;
Sv13.ui = Sv13.ui^Stmp5.ui;
Stmp5.ui = Sv21.ui^Sv23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv21.ui = Sv21.ui^Stmp5.ui;
Sv23.ui = Sv23.ui^Stmp5.ui;
Stmp5.ui = Sv31.ui^Sv33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv31.ui = Sv31.ui^Stmp5.ui;
Sv33.ui = Sv33.ui^Stmp5.ui;
Stmp5.ui = Stmp1.ui^Stmp3.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp1.ui = Stmp1.ui^Stmp5.ui;
Stmp3.ui = Stmp3.ui^Stmp5.ui;
// If columns 1-3 have been swapped, negate 1st column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa11.f = Sa11.f*Stmp4.f;
Sa21.f = Sa21.f*Stmp4.f;
Sa31.f = Sa31.f*Stmp4.f;
Sv11.f = Sv11.f*Stmp4.f;
Sv21.f = Sv21.f*Stmp4.f;
Sv31.f = Sv31.f*Stmp4.f;
// Swap columns 2-3 if necessary
Stmp4.ui = (Stmp2.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa12.ui^Sa13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa12.ui = Sa12.ui^Stmp5.ui;
Sa13.ui = Sa13.ui^Stmp5.ui;
Stmp5.ui = Sa22.ui^Sa23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa22.ui = Sa22.ui^Stmp5.ui;
Sa23.ui = Sa23.ui^Stmp5.ui;
Stmp5.ui = Sa32.ui^Sa33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sa32.ui = Sa32.ui^Stmp5.ui;
Sa33.ui = Sa33.ui^Stmp5.ui;
Stmp5.ui = Sv12.ui^Sv13.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv12.ui = Sv12.ui^Stmp5.ui;
Sv13.ui = Sv13.ui^Stmp5.ui;
Stmp5.ui = Sv22.ui^Sv23.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv22.ui = Sv22.ui^Stmp5.ui;
Sv23.ui = Sv23.ui^Stmp5.ui;
Stmp5.ui = Sv32.ui^Sv33.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Sv32.ui = Sv32.ui^Stmp5.ui;
Sv33.ui = Sv33.ui^Stmp5.ui;
Stmp5.ui = Stmp2.ui^Stmp3.ui;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp2.ui = Stmp2.ui^Stmp5.ui;
Stmp3.ui = Stmp3.ui^Stmp5.ui;
// If columns 2-3 have been swapped, negate 3rd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui&Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa13.f = Sa13.f*Stmp4.f;
Sa23.f = Sa23.f*Stmp4.f;
Sa33.f = Sa33.f*Stmp4.f;
Sv13.f = Sv13.f*Stmp4.f;
Sv23.f = Sv23.f*Stmp4.f;
Sv33.f = Sv33.f*Stmp4.f;
//###########################################################
// Construct QR factorization of A*V (=U*D) using Givens rotations
//###########################################################
Su11.f = 1.f; Su12.f = 0.f; Su13.f = 0.f;
Su21.f = 0.f; Su22.f = 1.f; Su23.f = 0.f;
Su31.f = 0.f; Su32.f = 0.f; Su33.f = 1.f;
Ssh.f = Sa21.f*Sa21.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa21.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = fmaxf(Sch.f, Sa11.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa11.f;
Stmp2.f = Ss.f*Sa21.f;
Sa11.f = Sc.f*Sa11.f;
Sa21.f = Sc.f*Sa21.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa21.f = __fsub_rn(Sa21.f, Stmp1.f);
Stmp1.f = Ss.f*Sa12.f;
Stmp2.f = Ss.f*Sa22.f;
Sa12.f = Sc.f*Sa12.f;
Sa22.f = Sc.f*Sa22.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa22.f = __fsub_rn(Sa22.f, Stmp1.f);
Stmp1.f = Ss.f*Sa13.f;
Stmp2.f = Ss.f*Sa23.f;
Sa13.f = Sc.f*Sa13.f;
Sa23.f = Sc.f*Sa23.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa23.f = __fsub_rn(Sa23.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su11.f;
Stmp2.f = Ss.f*Su12.f;
Su11.f = Sc.f*Su11.f;
Su12.f = Sc.f*Su12.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su12.f = __fsub_rn(Su12.f, Stmp1.f);
Stmp1.f = Ss.f*Su21.f;
Stmp2.f = Ss.f*Su22.f;
Su21.f = Sc.f*Su21.f;
Su22.f = Sc.f*Su22.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su22.f = __fsub_rn(Su22.f, Stmp1.f);
Stmp1.f = Ss.f*Su31.f;
Stmp2.f = Ss.f*Su32.f;
Su31.f = Sc.f*Su31.f;
Su32.f = Sc.f*Su32.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su32.f = __fsub_rn(Su32.f, Stmp1.f);
// Second Givens rotation
Ssh.f = Sa31.f*Sa31.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa31.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = fmaxf(Sch.f, Sa11.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa11.f;
Stmp2.f = Ss.f*Sa31.f;
Sa11.f = Sc.f*Sa11.f;
Sa31.f = Sc.f*Sa31.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f*Sa12.f;
Stmp2.f = Ss.f*Sa32.f;
Sa12.f = Sc.f*Sa12.f;
Sa32.f = Sc.f*Sa32.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f*Sa13.f;
Stmp2.f = Ss.f*Sa33.f;
Sa13.f = Sc.f*Sa13.f;
Sa33.f = Sc.f*Sa33.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su11.f;
Stmp2.f = Ss.f*Su13.f;
Su11.f = Sc.f*Su11.f;
Su13.f = Sc.f*Su13.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f*Su21.f;
Stmp2.f = Ss.f*Su23.f;
Su21.f = Sc.f*Su21.f;
Su23.f = Sc.f*Su23.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f*Su31.f;
Stmp2.f = Ss.f*Su33.f;
Su31.f = Sc.f*Su31.f;
Su33.f = Sc.f*Su33.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
// Third Givens Rotation
Ssh.f = Sa32.f*Sa32.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui&Sa32.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa22.f);
Sch.f = fmaxf(Sch.f, Sa22.f);
Sch.f = fmaxf(Sch.f, gsmall_number);
Stmp5.ui = (Sa22.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f*Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui&Ssh.ui;
Stmp2.ui = ~Stmp5.ui&Sch.ui;
Sch.ui = Stmp5.ui&Sch.ui;
Ssh.ui = Stmp5.ui&Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f*Sch.f;
Stmp2.f = Ssh.f*Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f*0.5f;
Stmp3.f = Stmp1.f*Stmp4.f;
Stmp3.f = Stmp1.f*Stmp3.f;
Stmp3.f = Stmp2.f*Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f*Stmp1.f;
Ssh.f = Ssh.f*Stmp1.f;
Sc.f = Sch.f*Sch.f;
Ss.f = Ssh.f*Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f*Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f*Sa21.f;
Stmp2.f = Ss.f*Sa31.f;
Sa21.f = Sc.f*Sa21.f;
Sa31.f = Sc.f*Sa31.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f*Sa22.f;
Stmp2.f = Ss.f*Sa32.f;
Sa22.f = Sc.f*Sa22.f;
Sa32.f = Sc.f*Sa32.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f*Sa23.f;
Stmp2.f = Ss.f*Sa33.f;
Sa23.f = Sc.f*Sa23.f;
Sa33.f = Sc.f*Sa33.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f*Su12.f;
Stmp2.f = Ss.f*Su13.f;
Su12.f = Sc.f*Su12.f;
Su13.f = Sc.f*Su13.f;
Su12.f = __fadd_rn(Su12.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f*Su22.f;
Stmp2.f = Ss.f*Su23.f;
Su22.f = Sc.f*Su22.f;
Su23.f = Sc.f*Su23.f;
Su22.f = __fadd_rn(Su22.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f*Su32.f;
Stmp2.f = Ss.f*Su33.f;
Su32.f = Sc.f*Su32.f;
Su33.f = Sc.f*Su33.f;
Su32.f = __fadd_rn(Su32.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
v11 = Sv11.f; v12 = Sv12.f; v13 = Sv13.f;
v21 = Sv21.f; v22 = Sv22.f; v23 = Sv23.f;
v31 = Sv31.f; v32 = Sv32.f; v33 = Sv33.f;
u11 = Su11.f; u12 = Su12.f; u13 = Su13.f;
u21 = Su21.f; u22 = Su22.f; u23 = Su23.f;
u31 = Su31.f; u32 = Su32.f; u33 = Su33.f;
s11 = Sa11.f;
//s12 = Sa12.f; s13 = Sa13.f; s21 = Sa21.f;
s22 = Sa22.f;
//s23 = Sa23.f; s31 = Sa31.f; s32 = Sa32.f;
s33 = Sa33.f;
}
__global__ void svd3_SOA(const float*__restrict__ input,
float*__restrict__ output,
const int testsize)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= testsize) return;
svd(
input[tid + 0 * testsize], input[tid + 1 * testsize], input[tid + 2 * testsize],
input[tid + 3 * testsize], input[tid + 4 * testsize], input[tid + 5 * testsize],
input[tid + 6 * testsize], input[tid + 7 * testsize], input[tid + 8 * testsize],
output[tid + 0 * testsize], output[tid + 1 * testsize], output[tid + 2 * testsize],
output[tid + 3 * testsize], output[tid + 4 * testsize], output[tid + 5 * testsize],
output[tid + 6 * testsize], output[tid + 7 * testsize], output[tid + 8 * testsize],
output[tid + 9 * testsize], output[tid + 10 * testsize], output[tid + 11 * testsize],
output[tid + 12 * testsize], output[tid + 13 * testsize], output[tid + 14 * testsize],
output[tid + 15 * testsize], output[tid + 16 * testsize], output[tid + 17 * testsize],
output[tid + 18 * testsize], output[tid + 19 * testsize], output[tid + 20 * testsize]
);
}
|
262987dc00bea7748203acab0793de8dbfb03704.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <map>
#include <vector>
#include <iostream>
#include <cmath>
#include <stdlib.h> /* srand, rand */
#include <time.h>
#include "CycleTimer.h"
#define IDX(x,y) ((x) + (width * (y)))
#define GRID(x,y) grid[((x) + (width * (y)))]
#define BOMB(x,y) (GRID((x),(y)) == 10 || GRID((x),(y)) == (-10))
#define HIDDEN(x,y) (GRID((x),(y)) < 0)
#define IX(i) ((i)%width)
#define IY(i) ((i)/width)
void printGrid(int width, int height, int *grid) {
int i,j;
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
for(j=0; j<height; j++) {
std::cout<<(j%10)<<" ";
for(i=0; i<width; i++) {
if(grid[IDX(i,j)]==0){
std::cout<<" ";
continue;
}
std::cout<<grid[IDX(i,j)]<<" ";
if(grid[IDX(i,j)]>=0)
std::cout<<" ";
if(abs(grid[IDX(i,j)])<10)
std::cout<<" ";
}
std::cout<<"\n";
}
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
}
void printGridCompact(int width, int height, int *grid) {
int i,j;
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
for(j=0; j<height; j++) {
std::cout<<(j%10)<<" ";
for(i=0; i<width; i++) {
if(grid[IDX(i,j)]==0){
std::cout<<" ";
continue;
}
std::cout<<grid[IDX(i,j)]<<" ";
}
std::cout<<"\n";
}
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
}
/*
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
*/
/**
* If on a border will set to index number, 0 otherwise.
* If the cell is on the grid border but not on a real border will be negative
*/
__global__ void identify_boundaries(int width, int height, int *grid, int *borders) {
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i,j;
borders[index] = 0;
if(!HIDDEN(x,y))
return;
if(x==0 || x==(width-1) || y==0 || y==(height-1)) {
borders[index] = -(index + 1);
//return;
}
for(i=max(0, x-1); i<=min(width-1, x+1); i++) {
for(j=max(0, y-1); j<=min(height-1, y+1) ; j++) {
if(!HIDDEN(i,j)) {
borders[index] = index + 1;
return;
}
}
}
}
/**
* If a tile is on a boundary it will attempty to make itself a smaller boundary
* by looking at it's neighbors. If it finds a smaller neighbor then set done to true.
* If the tile is on the grid border but not the solving border then the value will be negative.
*/
__global__ void consolidate_boundaries(int width, int height,
int *grid, int *borders, bool *done) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i,j;
int newMin = borders[index];
int other;
bool border = newMin < 0;
if(border)
newMin = -newMin;
for(j=max(0, y-2); j<=min(height-1, y+2); j++) {
for(i=max(0, x-2); i<=min(width-1, x+2); i++) {
other = abs(borders[IDX(i,j)]);
if(other != 0 && other<newMin)
newMin = other;
}
}
if(border)
newMin = -newMin;
if(newMin != borders[index]) {
borders[index] = newMin;
*done = false;
}
}
/**
* Every revealed cell next to a border cell will become negative of that border id.
* All other cells will become 0 (except other border cells).
* When called *border should have positive values on borders, negative on grid
* borders, and 0 otherwise.
*/
__global__ void update_borders(int width, int height, int *border, int *grid, bool *flags) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i, j, xt, yt;
// if a cell is flagged then automatically becomes 0
if(flags[index]) {
border[index] = 0;
return;
}
// if on a border do nothing
if(border[index] > 0) {
return;
}
// if a cell is negative then it was on a grid border and not an actual border
// if hidden then don't do anything with the cell
if(border[index]<0 || HIDDEN(x,y)) {
border[index] = 0;
return;
}
// each known cell tries to identify with a border
for(j=-1; j<=1; j++) {
yt = y+j;
if(yt<0 || yt>=height)
continue;
for(i=-1; i<=1; i++) {
xt = x+i;
if(xt<0 || xt>=width)
continue;
if(border[IDX(xt,yt)]>0 && !flags[IDX(xt,yt)]) {
border[index] = -border[IDX(xt,yt)];
return;
}
}
}
}
/**
* sets all border cells to 0
*/
__global__ void grid_borders_zero(int width, int height, int *arr) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
arr[index]=0;
}
/**
* Sets all cells that are 3 to 0
*/
__global__ void finalize_knowns(int width, int height, int *knowns) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
if(knowns[index]==3)
knowns[index]=0;
}
/**
* Sets all cells that are uncovered or flagged to 0
*/
__global__ void clean_knowns(int width, int height, int *grid, bool *flags, int *knowns) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
if(flags[index] || grid[index]>=0)
knowns[index]=0;
}
/**
* Fill in the temp grid as follows: positive number of unknown bombs if uncovered.
* Subtract known (flagged) bombs from self (if positive number).
* 0 otherwise
*/
__global__ void make_temp_grid(int width, int height, int *grid, bool *flags,
int *old_knowns, int *temp_grid) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i, j, xt, yt;
if(flags[index] || grid[index]<0) {
temp_grid[index] = 0;
return;
}
int grid_num = grid[index];
for(j=-1; j<=1; j++) {
yt = y+j;
if(yt<0 || yt>=height)
continue;
for(i=-1; i<=1; i++) {
xt = x+i;
if(xt<0 || xt>=width)
continue;
if(flags[IDX(xt,yt)] || old_knowns[IDX(xt,yt)]==1)
grid_num--;
}
}
temp_grid[index] = grid_num;
}
/**
* Uses values in blockId and threadId to know where bombs are placed
* in this call. The combined 4 ints can be seen as one long bit vector
* which store where bombs are, index will return true if there is a 1 (bomb)
* at that index
*/
__device__ bool is_bomb(int index, int thread_max, int x_block_max, int y_block_max) {
// only need threadIdx, if index<10 look at the first 1024 values which are stored
// in threadIdx.x
if(index < thread_max) {
return ((threadIdx.x>>index) & 1) == 1;
}
// there's probably a cleaner method to do this, but its pretty short and not necessarily slow
index -= thread_max;
if(index < x_block_max) {
return ((blockIdx.x>>index) & 1) == 1;
}
index -= x_block_max;
if(index < y_block_max) {
return ((blockIdx.y>>index) & 1) == 1;
}
index -= y_block_max;
//if(index < max_block_dim) {
return ((blockIdx.z>>index) & 1) == 1;
//}
//return true;
}
/**
* Each thread takes one possible solution, as defined by threadIdx and blockIdx.
* If the solution is not valid do nothing.
* All values in knowns should be 0 at start of the call.
* If the value can be a bomb the first bit will be 1, else 0.
* If the value can be empty the second bit will be 1, else 0.
*/
__global__ void find_solvable_tiles(int width, int height, int *grid, int *knowns,
int *border, int border_size, int *temp_grid,
int *known_border, int known_border_size,
int thread_max, int x_block_max, int y_block_max) {
// in the for loop keeps track of which known cell we are checking against
int c, i, j, x, y, cell, bombs;
for(c = 0; c<known_border_size; c++) {
cell = known_border[c];
bombs = 0;
x = IX(cell);
y = IY(cell);
for(i=max(x-1,0); i<=min(x+1,width-1); i++) {
for(j=max(y-1,0); j<=min(y+1,height-1); j++) {
if(temp_grid[IDX(i,j)]<0)
if(is_bomb((-1-temp_grid[IDX(i,j)]), thread_max, x_block_max, y_block_max))
bombs++;
}
}
if(temp_grid[cell]!=bombs)
return;
}
// if the program gets here then this is a valid solution
// commit into knowns (first bit->1 if bomb, second bit->1 if not)
for(c=0; c<border_size; c++) {
cell = border[c];
//atomicAdd(&knowns[cell], 1);
if(is_bomb(c, thread_max, x_block_max, y_block_max)) {
atomicOr(&knowns[cell], 1);
} else {
atomicOr(&knowns[cell], 2);
}
}
}
/**
* Scans ever element in the border if it has solution
* Callable right after find_solvable_tiles with same data
*/
__global__ void border_has_solution(int width, int *knowns, int *border,
int border_id, bool *solution) {
//int index = threadIdx.x;
int index = threadIdx.x + (width * blockIdx.x);
if(border[index] != border_id)
return;
int c = knowns[index];
if(c==1 || c==2)
*solution = true;
}
__global__ void preliminary_knowns(int width, int height, int *grid, bool *flags,
int *border, int *knowns) {
//int index = threadIdx.x;
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
if(index >= (width*height) || border[index]>=0)
return;
//int x = IX(index), y = IY(index);
int bomb_count = 0, unknown_count = 0;
for(int j=max(0,y-1); j<=min(height-1, y+1); j++)
for(int i=max(0,x-1); i<=min(width-1, x+1); i++) {
int c = IDX(i,j);
if(knowns[c]==1 || flags[c])
bomb_count++;
else if(knowns[c]==0 && grid[c]<0)
unknown_count++;
}
if(unknown_count == 0)
return;
int all_bombs = 0; // will be 1 if all should be bombs, -1 if clear, 0 ow
if((grid[index]-bomb_count) == unknown_count)
all_bombs = 1;
else if(bomb_count == grid[index])
all_bombs = -1;
// if know neighbors then fill them
if(all_bombs!=0) {
for(int j=max(0,y-1); j<=min(height-1, y+1); j++)
for(int i=max(0,x-1); i<=min(width-1, x+1); i++) {
int c = IDX(i,j);
if(flags[c] || grid[c]>=0 || border[c]<=0 || knowns[c]!=0)
continue;
if(all_bombs<0)//all clear
atomicOr(&knowns[c], 2);
else //all bombs
atomicOr(&knowns[c], 1);
}
}
}
/**
* Recursive call to display cell
*/
void hint_display_cell(int x, int y, int width, int height, int *grid) {
if(GRID(x,y)>=0)
return;
grid[IDX(x,y)] = -(GRID(x,y)+1);
if(GRID(x,y)==0) {
for(int i = ::max(x-1,0); i<=x+1 && i<width; i++) {
for(int j = ::max(y-1,0); j<=y+1 && j<height; j++) {
if(HIDDEN(i,j))
hint_display_cell(i,j, width, height, grid);
}
}
}
}
/**
* Knowns is the working array that this function will overwrite with new data.
* Assumes that all flags are correct.
*/
bool known_cells(int width, int height, int *grid, bool *flags, int *knowns,
int* border, bool random_sol, bool display_time, bool display_knowns,
bool display_debug_grids) {
double totalStartTime = CycleTimer::currentSeconds();
hipDeviceProp_t *prop = (hipDeviceProp_t *)malloc(sizeof(hipDeviceProp_t));
hipGetDeviceProperties(prop, 0);
//std::cout<<"Threads: "<<prop->maxThreadsPerBlock<<" Blocks: "<<prop->maxGridSize[0]<<
//" "<<prop->maxGridSize[1]<<" "<<prop->maxGridSize[2]<<"\n";
bool changed_grid = false;
int size = width*height;
//int *border = new int[size];
int *device_border;
int *device_grid;
bool *device_flags;
int *device_knowns;
int *device_temp_grid;
bool *device_border_done;
bool *device_solution;
int bool_array_size = sizeof(bool)*size;
int int_array_size = sizeof(int)*size;
hipMalloc(&device_grid, int_array_size);
hipMalloc(&device_flags, bool_array_size);
hipMalloc(&device_knowns, int_array_size);
hipMalloc(&device_temp_grid, int_array_size);
hipMalloc(&device_border, int_array_size);
hipMalloc(&device_border_done, sizeof(bool));
hipMalloc(&device_solution, sizeof(bool));
hipMemcpy(device_grid, grid, int_array_size, hipMemcpyHostToDevice);
hipMemcpy(device_knowns, knowns, int_array_size, hipMemcpyHostToDevice);
hipMemcpy(device_flags, flags, bool_array_size, hipMemcpyHostToDevice);
// each cell identifies itself as border or grid boundary
hipLaunchKernelGGL(( identify_boundaries), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_border);
hipDeviceSynchronize();
bool border_done = false;
// make all numbers on the same border the same (lowest) number
while(!border_done) {
border_done = true;
hipMemcpy(device_border_done, &border_done, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( consolidate_boundaries), dim3(height), dim3(width), 0, 0, width, height, device_grid,
device_border, device_border_done);
hipDeviceSynchronize();
hipMemcpy(&border_done, device_border_done,
sizeof(bool), hipMemcpyDeviceToHost);
}
//hipDeviceSynchronize();
hipLaunchKernelGGL(( update_borders), dim3(height), dim3(width), 0, 0, width, height, device_border, device_grid, device_flags);
hipDeviceSynchronize();
// borders are now identified by numbers
hipMemcpy(border, device_border, int_array_size, hipMemcpyDeviceToHost);
if(display_debug_grids) {
std::cout<<"\nBorder:\n";
printGrid(width, height, border);
}
// start filling out temp grid
// uncovered cells in device_temp_grid will now have the number of unflagged mines touching them
hipLaunchKernelGGL(( clean_knowns), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_flags, device_knowns);
hipDeviceSynchronize();
double prelimStartTime = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( preliminary_knowns), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_flags,
device_border, device_knowns);
hipDeviceSynchronize();
hipLaunchKernelGGL(( preliminary_knowns), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_flags,
device_border, device_knowns);
hipDeviceSynchronize();
double prelimEndTime = CycleTimer::currentSeconds();
if(display_debug_grids) {
hipMemcpy(knowns, device_knowns, int_array_size, hipMemcpyDeviceToHost);
std::cout<<"After prelim:\n";
printGrid(width, height, knowns);
}
hipLaunchKernelGGL(( make_temp_grid), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_flags,
device_knowns, device_temp_grid);
hipDeviceSynchronize();
hipMemcpy(knowns, device_knowns, int_array_size, hipMemcpyDeviceToHost);
int *temp_grid = new int[size];
// get the temp grid from CPU
hipMemcpy(temp_grid, device_temp_grid, int_array_size, hipMemcpyDeviceToHost);
std::map<int, std::vector<int> > borders;
std::map<int, std::vector<int> > known_borders;
// fill in the lists for the borders
// One list for the known and another for the unknown borders
for(int i=0; i<size; i++) {
// if border is 0 or there is a previous solution, don't be part of the border
if(border[i]==0 || knowns[i]!=0)
continue;
// fill in vector for the known boundary
if(border[i]<0) {
if(known_borders.count(-border[i])==0)
known_borders[-border[i]] = std::vector<int>(1,i);
else
known_borders[-border[i]].push_back(i);
continue;
}
if(borders.count(border[i])==0)
borders[border[i]] = std::vector<int>(1,i);
else
borders[border[i]].push_back(i);
// temp grid will have the -index-1 when the cell has an unknown, will index into border array
temp_grid[i] = -(borders[border[i]].size());
}
// changed temp_grid above, now put back into device_temp and delete
hipMemcpy(device_temp_grid, temp_grid, int_array_size, hipMemcpyHostToDevice);
if(display_debug_grids) {
std::cout<<"\nTemp Grid:\n";
printGrid(width, height, temp_grid);
}
delete(temp_grid);
// Calculate the max number of cells each dimension can be responsible for.
// Do this by calculating log_2 of the max thread and difference max dimensions
// based on GPU information
int thread_max = 0, x_block_max = 0, y_block_max = 0, z_block_max = 0;
int temp = prop->maxThreadsPerBlock;
while (temp >>= 1) ++thread_max;
temp = prop->maxGridSize[0];
while (temp >>= 1) ++x_block_max;
temp = prop->maxGridSize[1];
while (temp >>= 1) ++y_block_max;
temp = prop->maxGridSize[2];
while (temp >>= 1) ++z_block_max;
z_block_max--;
x_block_max--;
y_block_max--;
if(display_time)
std::cout<<"Max Threads: "<<thread_max<<" Max Blocks: "<<x_block_max<<" "<<
y_block_max<<" "<<z_block_max<<"\n";
//std::cout<<prop->maxThreadsPerBlock<<" "<<prop->maxGridSize[0]<<" "<<
//prop->maxGridSize[1]<<" "<<prop->maxGridSize[2]<<"\n";
double startTime = CycleTimer::currentSeconds();
double calcTime = 0;
// for each boundary find possible solutions, actuall work here
for (std::map<int,std::vector<int> >::iterator it=borders.begin();
it!=borders.end(); ++it) {
std::vector<int> border_vec = it->second;
std::vector<int> known_border_vec = known_borders[it->first];
/*
std::cout<<"Border "<< it->first <<" size: "<<border_vec.size()<<
" known size: "<<known_border_vec.size()<<"\nBorder: ";
for(int i=0; i<border_vec.size(); i++)
std::cout<<border_vec[i]<<" ";
std::cout<<"\nKnown: ";
for(int i=0; i<known_border_vec.size(); i++)
std::cout<<known_border_vec[i]<<" ";
std::cout<<"\n";
*/
//std::cout << it->first << " => " << it->second << '\n';
int *border_elements;
hipMalloc(&border_elements, sizeof(int)*border_vec.size());
hipMemcpy(border_elements, &border_vec[0], sizeof(int)*border_vec.size(),
hipMemcpyHostToDevice);
int *known_border_elements;
hipMalloc(&known_border_elements, sizeof(int)*known_border_vec.size());
hipMemcpy(known_border_elements, &known_border_vec[0],
sizeof(int)*known_border_vec.size(), hipMemcpyHostToDevice);
// these are the number of grid cells the different dimensions will represent
// actual numbers that will be sent to the gpu will be 2^x
int block_threads = thread_max;
int block_x = 0;
int block_y = 0;
int block_z = 0;
int border_left = border_vec.size();
bool skip = false;
if(border_left <= thread_max) {
block_threads = border_left;
} else {
border_left -= thread_max;
// set block_x
if(border_left <= x_block_max) {
block_x = border_left;
} else {
block_x = x_block_max;
border_left -= x_block_max;
// if still more left set block_y
if(border_left <= y_block_max) {
block_y = border_left;
} else {
block_y = y_block_max;
border_left -= y_block_max;
// if still more left set block_z
if(border_left <= z_block_max) {
block_z = border_left;
} else {
//if you get to this point the border is quite large, just
// give up
skip = true;
}
}
}
}
if(!skip) {
dim3 grid_block(1<<block_x, 1<<block_y, 1<<block_z);
if(display_time)
std::cout<<"Border size: "<<border_vec.size()<<"\nThreads: "<<
pow(2.0,block_threads)<< " Grid: "<<grid_block.x<<" "<<
grid_block.y<<" "<<grid_block.z<<"\n";
double calcStartTime = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( find_solvable_tiles), dim3(grid_block),
dim3( 1<<block_threads) , 0, 0, 0, width, height, device_grid, device_knowns,
border_elements, border_vec.size(), device_temp_grid,
known_border_elements, known_border_vec.size(),
thread_max, x_block_max, y_block_max);
hipDeviceSynchronize();
double calcEndTime = CycleTimer::currentSeconds();
if(display_time)
std::cout<<"Calc Time: "<<(calcEndTime-calcStartTime)<<"\n\n";
calcTime += (calcEndTime-calcStartTime);
// check if there is a solution for this border, if not unveil a random tile
//random_sol = false;
if(random_sol) {
bool solution=false;
hipMemcpy(device_solution, &solution, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( border_has_solution), dim3(height), dim3(width), 0, 0, width, device_knowns, device_border, it->first,
device_solution);
hipDeviceSynchronize();
hipMemcpy(&solution, device_solution, sizeof(bool), hipMemcpyDeviceToHost);
if(!solution) {
//std::cout<<"NO SOLUTION for border "<< it->first <<"\n";
changed_grid = true;
}
srand(time(NULL));
int c = 0;//don't want to repeat forever
while(!solution && c<border_vec.size()) {
int t = border_vec[rand()%border_vec.size()];
int x = IX(t), y = IY(t);
if(!BOMB(x,y)) {
std::cout<<"Revealed grid "<<x<<" "<<y<<"\n";
hint_display_cell(x, y, width, height, grid);
solution = true;
}
c++;
}
}
}
// clean up
hipFree(border_elements);
hipFree(known_border_elements);
}
if(display_time) {
std::cout<<"\nTotal Calc Time: "<<calcTime<<"\n";
double endTime = CycleTimer::currentSeconds();
std::cout<<"Prelim run time: "<<(prelimEndTime-prelimStartTime)<<
"\nMain work loop: "<<(endTime-startTime)<<"\n";
}
//hipDeviceSynchronize();
//hipMemcpy(knowns, device_knowns, int_array_size, hipMemcpyDeviceToHost);
//std::cout<<"\nBefore Clean:\n";
//printGrid(width,height,knowns);
hipLaunchKernelGGL(( finalize_knowns), dim3(height), dim3(width), 0, 0, width, height, device_knowns);
hipLaunchKernelGGL(( clean_knowns), dim3(height), dim3(width), 0, 0, width, height, device_grid, device_flags, device_knowns);
hipDeviceSynchronize();
hipMemcpy(knowns, device_knowns, int_array_size, hipMemcpyDeviceToHost);
hipFree(device_grid);
hipFree(device_flags);
hipFree(device_knowns);
hipFree(device_temp_grid);
hipFree(device_border_done);
hipFree(device_border);
hipFree(device_solution);
free(prop);
delete(border);
if(display_time) {
double totalEndTime = CycleTimer::currentSeconds();
std::cout<<"Total execution time: "<<(totalEndTime-totalStartTime)<<"\n";
}
if(display_knowns || display_debug_grids) {
std::cout<<"\nFinal knowns:\n";
printGridCompact(width,height,knowns);
//std::cout<<"Done\n";
}
return changed_grid;
}
void
saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
/*
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 512;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_x;
float* device_y;
float* device_result;
//
// TODO allocate device memory buffers on the GPU using hipMalloc
//
size_t size = sizeof(float)*N;
hipMalloc(&device_x, size);
hipMalloc(&device_y, size);
hipMalloc(&device_result, size);
//
// TODO copy input arrays to the GPU using hipMemcpy
//
hipMemcpy(device_x, xarray, size, hipMemcpyHostToDevice);
hipMemcpy(device_y, yarray, size, hipMemcpyHostToDevice);
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
// run kernel
saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result);
hipDeviceSynchronize();
// end timing after result has been copied back into host memory
double endTime = CycleTimer::currentSeconds();
//
// TODO copy result from GPU using hipMemcpy
//
hipMemcpy(resultarray, device_result, size, hipMemcpyDeviceToHost);
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
// TODO free memory buffers on the GPU
hipFree(device_x);
hipFree(device_y);
hipFree(device_result);
*/
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
}
| 262987dc00bea7748203acab0793de8dbfb03704.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <map>
#include <vector>
#include <iostream>
#include <cmath>
#include <stdlib.h> /* srand, rand */
#include <time.h>
#include "CycleTimer.h"
#define IDX(x,y) ((x) + (width * (y)))
#define GRID(x,y) grid[((x) + (width * (y)))]
#define BOMB(x,y) (GRID((x),(y)) == 10 || GRID((x),(y)) == (-10))
#define HIDDEN(x,y) (GRID((x),(y)) < 0)
#define IX(i) ((i)%width)
#define IY(i) ((i)/width)
void printGrid(int width, int height, int *grid) {
int i,j;
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
for(j=0; j<height; j++) {
std::cout<<(j%10)<<" ";
for(i=0; i<width; i++) {
if(grid[IDX(i,j)]==0){
std::cout<<" ";
continue;
}
std::cout<<grid[IDX(i,j)]<<" ";
if(grid[IDX(i,j)]>=0)
std::cout<<" ";
if(abs(grid[IDX(i,j)])<10)
std::cout<<" ";
}
std::cout<<"\n";
}
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
}
void printGridCompact(int width, int height, int *grid) {
int i,j;
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
for(j=0; j<height; j++) {
std::cout<<(j%10)<<" ";
for(i=0; i<width; i++) {
if(grid[IDX(i,j)]==0){
std::cout<<" ";
continue;
}
std::cout<<grid[IDX(i,j)]<<" ";
}
std::cout<<"\n";
}
std::cout<<" ";
for(i=0; i<width; i++)
std::cout<<(i%10)<<" ";
std::cout<<"\n";
}
/*
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
*/
/**
* If on a border will set to index number, 0 otherwise.
* If the cell is on the grid border but not on a real border will be negative
*/
__global__ void identify_boundaries(int width, int height, int *grid, int *borders) {
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i,j;
borders[index] = 0;
if(!HIDDEN(x,y))
return;
if(x==0 || x==(width-1) || y==0 || y==(height-1)) {
borders[index] = -(index + 1);
//return;
}
for(i=max(0, x-1); i<=min(width-1, x+1); i++) {
for(j=max(0, y-1); j<=min(height-1, y+1) ; j++) {
if(!HIDDEN(i,j)) {
borders[index] = index + 1;
return;
}
}
}
}
/**
* If a tile is on a boundary it will attempty to make itself a smaller boundary
* by looking at it's neighbors. If it finds a smaller neighbor then set done to true.
* If the tile is on the grid border but not the solving border then the value will be negative.
*/
__global__ void consolidate_boundaries(int width, int height,
int *grid, int *borders, bool *done) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i,j;
int newMin = borders[index];
int other;
bool border = newMin < 0;
if(border)
newMin = -newMin;
for(j=max(0, y-2); j<=min(height-1, y+2); j++) {
for(i=max(0, x-2); i<=min(width-1, x+2); i++) {
other = abs(borders[IDX(i,j)]);
if(other != 0 && other<newMin)
newMin = other;
}
}
if(border)
newMin = -newMin;
if(newMin != borders[index]) {
borders[index] = newMin;
*done = false;
}
}
/**
* Every revealed cell next to a border cell will become negative of that border id.
* All other cells will become 0 (except other border cells).
* When called *border should have positive values on borders, negative on grid
* borders, and 0 otherwise.
*/
__global__ void update_borders(int width, int height, int *border, int *grid, bool *flags) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i, j, xt, yt;
// if a cell is flagged then automatically becomes 0
if(flags[index]) {
border[index] = 0;
return;
}
// if on a border do nothing
if(border[index] > 0) {
return;
}
// if a cell is negative then it was on a grid border and not an actual border
// if hidden then don't do anything with the cell
if(border[index]<0 || HIDDEN(x,y)) {
border[index] = 0;
return;
}
// each known cell tries to identify with a border
for(j=-1; j<=1; j++) {
yt = y+j;
if(yt<0 || yt>=height)
continue;
for(i=-1; i<=1; i++) {
xt = x+i;
if(xt<0 || xt>=width)
continue;
if(border[IDX(xt,yt)]>0 && !flags[IDX(xt,yt)]) {
border[index] = -border[IDX(xt,yt)];
return;
}
}
}
}
/**
* sets all border cells to 0
*/
__global__ void grid_borders_zero(int width, int height, int *arr) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
arr[index]=0;
}
/**
* Sets all cells that are 3 to 0
*/
__global__ void finalize_knowns(int width, int height, int *knowns) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
if(knowns[index]==3)
knowns[index]=0;
}
/**
* Sets all cells that are uncovered or flagged to 0
*/
__global__ void clean_knowns(int width, int height, int *grid, bool *flags, int *knowns) {
//int index = threadIdx.x;
int index = threadIdx.x + (width*blockIdx.x);
if(flags[index] || grid[index]>=0)
knowns[index]=0;
}
/**
* Fill in the temp grid as follows: positive number of unknown bombs if uncovered.
* Subtract known (flagged) bombs from self (if positive number).
* 0 otherwise
*/
__global__ void make_temp_grid(int width, int height, int *grid, bool *flags,
int *old_knowns, int *temp_grid) {
//int index = threadIdx.x;
//int x = IX(index), y = IY(index);
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
int i, j, xt, yt;
if(flags[index] || grid[index]<0) {
temp_grid[index] = 0;
return;
}
int grid_num = grid[index];
for(j=-1; j<=1; j++) {
yt = y+j;
if(yt<0 || yt>=height)
continue;
for(i=-1; i<=1; i++) {
xt = x+i;
if(xt<0 || xt>=width)
continue;
if(flags[IDX(xt,yt)] || old_knowns[IDX(xt,yt)]==1)
grid_num--;
}
}
temp_grid[index] = grid_num;
}
/**
* Uses values in blockId and threadId to know where bombs are placed
* in this call. The combined 4 ints can be seen as one long bit vector
* which store where bombs are, index will return true if there is a 1 (bomb)
* at that index
*/
__device__ bool is_bomb(int index, int thread_max, int x_block_max, int y_block_max) {
// only need threadIdx, if index<10 look at the first 1024 values which are stored
// in threadIdx.x
if(index < thread_max) {
return ((threadIdx.x>>index) & 1) == 1;
}
// there's probably a cleaner method to do this, but its pretty short and not necessarily slow
index -= thread_max;
if(index < x_block_max) {
return ((blockIdx.x>>index) & 1) == 1;
}
index -= x_block_max;
if(index < y_block_max) {
return ((blockIdx.y>>index) & 1) == 1;
}
index -= y_block_max;
//if(index < max_block_dim) {
return ((blockIdx.z>>index) & 1) == 1;
//}
//return true;
}
/**
* Each thread takes one possible solution, as defined by threadIdx and blockIdx.
* If the solution is not valid do nothing.
* All values in knowns should be 0 at start of the call.
* If the value can be a bomb the first bit will be 1, else 0.
* If the value can be empty the second bit will be 1, else 0.
*/
__global__ void find_solvable_tiles(int width, int height, int *grid, int *knowns,
int *border, int border_size, int *temp_grid,
int *known_border, int known_border_size,
int thread_max, int x_block_max, int y_block_max) {
// in the for loop keeps track of which known cell we are checking against
int c, i, j, x, y, cell, bombs;
for(c = 0; c<known_border_size; c++) {
cell = known_border[c];
bombs = 0;
x = IX(cell);
y = IY(cell);
for(i=max(x-1,0); i<=min(x+1,width-1); i++) {
for(j=max(y-1,0); j<=min(y+1,height-1); j++) {
if(temp_grid[IDX(i,j)]<0)
if(is_bomb((-1-temp_grid[IDX(i,j)]), thread_max, x_block_max, y_block_max))
bombs++;
}
}
if(temp_grid[cell]!=bombs)
return;
}
// if the program gets here then this is a valid solution
// commit into knowns (first bit->1 if bomb, second bit->1 if not)
for(c=0; c<border_size; c++) {
cell = border[c];
//atomicAdd(&knowns[cell], 1);
if(is_bomb(c, thread_max, x_block_max, y_block_max)) {
atomicOr(&knowns[cell], 1);
} else {
atomicOr(&knowns[cell], 2);
}
}
}
/**
* Scans ever element in the border if it has solution
* Callable right after find_solvable_tiles with same data
*/
__global__ void border_has_solution(int width, int *knowns, int *border,
int border_id, bool *solution) {
//int index = threadIdx.x;
int index = threadIdx.x + (width * blockIdx.x);
if(border[index] != border_id)
return;
int c = knowns[index];
if(c==1 || c==2)
*solution = true;
}
__global__ void preliminary_knowns(int width, int height, int *grid, bool *flags,
int *border, int *knowns) {
//int index = threadIdx.x;
int x = threadIdx.x, y = blockIdx.x;
int index = x + (y*width);
if(index >= (width*height) || border[index]>=0)
return;
//int x = IX(index), y = IY(index);
int bomb_count = 0, unknown_count = 0;
for(int j=max(0,y-1); j<=min(height-1, y+1); j++)
for(int i=max(0,x-1); i<=min(width-1, x+1); i++) {
int c = IDX(i,j);
if(knowns[c]==1 || flags[c])
bomb_count++;
else if(knowns[c]==0 && grid[c]<0)
unknown_count++;
}
if(unknown_count == 0)
return;
int all_bombs = 0; // will be 1 if all should be bombs, -1 if clear, 0 ow
if((grid[index]-bomb_count) == unknown_count)
all_bombs = 1;
else if(bomb_count == grid[index])
all_bombs = -1;
// if know neighbors then fill them
if(all_bombs!=0) {
for(int j=max(0,y-1); j<=min(height-1, y+1); j++)
for(int i=max(0,x-1); i<=min(width-1, x+1); i++) {
int c = IDX(i,j);
if(flags[c] || grid[c]>=0 || border[c]<=0 || knowns[c]!=0)
continue;
if(all_bombs<0)//all clear
atomicOr(&knowns[c], 2);
else //all bombs
atomicOr(&knowns[c], 1);
}
}
}
/**
* Recursive call to display cell
*/
void hint_display_cell(int x, int y, int width, int height, int *grid) {
if(GRID(x,y)>=0)
return;
grid[IDX(x,y)] = -(GRID(x,y)+1);
if(GRID(x,y)==0) {
for(int i = std::max(x-1,0); i<=x+1 && i<width; i++) {
for(int j = std::max(y-1,0); j<=y+1 && j<height; j++) {
if(HIDDEN(i,j))
hint_display_cell(i,j, width, height, grid);
}
}
}
}
/**
* Knowns is the working array that this function will overwrite with new data.
* Assumes that all flags are correct.
*/
bool known_cells(int width, int height, int *grid, bool *flags, int *knowns,
int* border, bool random_sol, bool display_time, bool display_knowns,
bool display_debug_grids) {
double totalStartTime = CycleTimer::currentSeconds();
cudaDeviceProp *prop = (cudaDeviceProp *)malloc(sizeof(cudaDeviceProp));
cudaGetDeviceProperties(prop, 0);
//std::cout<<"Threads: "<<prop->maxThreadsPerBlock<<" Blocks: "<<prop->maxGridSize[0]<<
//" "<<prop->maxGridSize[1]<<" "<<prop->maxGridSize[2]<<"\n";
bool changed_grid = false;
int size = width*height;
//int *border = new int[size];
int *device_border;
int *device_grid;
bool *device_flags;
int *device_knowns;
int *device_temp_grid;
bool *device_border_done;
bool *device_solution;
int bool_array_size = sizeof(bool)*size;
int int_array_size = sizeof(int)*size;
cudaMalloc(&device_grid, int_array_size);
cudaMalloc(&device_flags, bool_array_size);
cudaMalloc(&device_knowns, int_array_size);
cudaMalloc(&device_temp_grid, int_array_size);
cudaMalloc(&device_border, int_array_size);
cudaMalloc(&device_border_done, sizeof(bool));
cudaMalloc(&device_solution, sizeof(bool));
cudaMemcpy(device_grid, grid, int_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(device_knowns, knowns, int_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(device_flags, flags, bool_array_size, cudaMemcpyHostToDevice);
// each cell identifies itself as border or grid boundary
identify_boundaries<<<height, width>>>(width, height, device_grid, device_border);
cudaThreadSynchronize();
bool border_done = false;
// make all numbers on the same border the same (lowest) number
while(!border_done) {
border_done = true;
cudaMemcpy(device_border_done, &border_done, sizeof(bool), cudaMemcpyHostToDevice);
consolidate_boundaries<<<height, width>>>(width, height, device_grid,
device_border, device_border_done);
cudaThreadSynchronize();
cudaMemcpy(&border_done, device_border_done,
sizeof(bool), cudaMemcpyDeviceToHost);
}
//cudaThreadSynchronize();
update_borders<<<height, width>>>(width, height, device_border, device_grid, device_flags);
cudaThreadSynchronize();
// borders are now identified by numbers
cudaMemcpy(border, device_border, int_array_size, cudaMemcpyDeviceToHost);
if(display_debug_grids) {
std::cout<<"\nBorder:\n";
printGrid(width, height, border);
}
// start filling out temp grid
// uncovered cells in device_temp_grid will now have the number of unflagged mines touching them
clean_knowns<<<height, width>>>(width, height, device_grid, device_flags, device_knowns);
cudaThreadSynchronize();
double prelimStartTime = CycleTimer::currentSeconds();
preliminary_knowns<<<height, width>>>(width, height, device_grid, device_flags,
device_border, device_knowns);
cudaThreadSynchronize();
preliminary_knowns<<<height, width>>>(width, height, device_grid, device_flags,
device_border, device_knowns);
cudaThreadSynchronize();
double prelimEndTime = CycleTimer::currentSeconds();
if(display_debug_grids) {
cudaMemcpy(knowns, device_knowns, int_array_size, cudaMemcpyDeviceToHost);
std::cout<<"After prelim:\n";
printGrid(width, height, knowns);
}
make_temp_grid<<<height, width>>>(width, height, device_grid, device_flags,
device_knowns, device_temp_grid);
cudaThreadSynchronize();
cudaMemcpy(knowns, device_knowns, int_array_size, cudaMemcpyDeviceToHost);
int *temp_grid = new int[size];
// get the temp grid from CPU
cudaMemcpy(temp_grid, device_temp_grid, int_array_size, cudaMemcpyDeviceToHost);
std::map<int, std::vector<int> > borders;
std::map<int, std::vector<int> > known_borders;
// fill in the lists for the borders
// One list for the known and another for the unknown borders
for(int i=0; i<size; i++) {
// if border is 0 or there is a previous solution, don't be part of the border
if(border[i]==0 || knowns[i]!=0)
continue;
// fill in vector for the known boundary
if(border[i]<0) {
if(known_borders.count(-border[i])==0)
known_borders[-border[i]] = std::vector<int>(1,i);
else
known_borders[-border[i]].push_back(i);
continue;
}
if(borders.count(border[i])==0)
borders[border[i]] = std::vector<int>(1,i);
else
borders[border[i]].push_back(i);
// temp grid will have the -index-1 when the cell has an unknown, will index into border array
temp_grid[i] = -(borders[border[i]].size());
}
// changed temp_grid above, now put back into device_temp and delete
cudaMemcpy(device_temp_grid, temp_grid, int_array_size, cudaMemcpyHostToDevice);
if(display_debug_grids) {
std::cout<<"\nTemp Grid:\n";
printGrid(width, height, temp_grid);
}
delete(temp_grid);
// Calculate the max number of cells each dimension can be responsible for.
// Do this by calculating log_2 of the max thread and difference max dimensions
// based on GPU information
int thread_max = 0, x_block_max = 0, y_block_max = 0, z_block_max = 0;
int temp = prop->maxThreadsPerBlock;
while (temp >>= 1) ++thread_max;
temp = prop->maxGridSize[0];
while (temp >>= 1) ++x_block_max;
temp = prop->maxGridSize[1];
while (temp >>= 1) ++y_block_max;
temp = prop->maxGridSize[2];
while (temp >>= 1) ++z_block_max;
z_block_max--;
x_block_max--;
y_block_max--;
if(display_time)
std::cout<<"Max Threads: "<<thread_max<<" Max Blocks: "<<x_block_max<<" "<<
y_block_max<<" "<<z_block_max<<"\n";
//std::cout<<prop->maxThreadsPerBlock<<" "<<prop->maxGridSize[0]<<" "<<
//prop->maxGridSize[1]<<" "<<prop->maxGridSize[2]<<"\n";
double startTime = CycleTimer::currentSeconds();
double calcTime = 0;
// for each boundary find possible solutions, actuall work here
for (std::map<int,std::vector<int> >::iterator it=borders.begin();
it!=borders.end(); ++it) {
std::vector<int> border_vec = it->second;
std::vector<int> known_border_vec = known_borders[it->first];
/*
std::cout<<"Border "<< it->first <<" size: "<<border_vec.size()<<
" known size: "<<known_border_vec.size()<<"\nBorder: ";
for(int i=0; i<border_vec.size(); i++)
std::cout<<border_vec[i]<<" ";
std::cout<<"\nKnown: ";
for(int i=0; i<known_border_vec.size(); i++)
std::cout<<known_border_vec[i]<<" ";
std::cout<<"\n";
*/
//std::cout << it->first << " => " << it->second << '\n';
int *border_elements;
cudaMalloc(&border_elements, sizeof(int)*border_vec.size());
cudaMemcpy(border_elements, &border_vec[0], sizeof(int)*border_vec.size(),
cudaMemcpyHostToDevice);
int *known_border_elements;
cudaMalloc(&known_border_elements, sizeof(int)*known_border_vec.size());
cudaMemcpy(known_border_elements, &known_border_vec[0],
sizeof(int)*known_border_vec.size(), cudaMemcpyHostToDevice);
// these are the number of grid cells the different dimensions will represent
// actual numbers that will be sent to the gpu will be 2^x
int block_threads = thread_max;
int block_x = 0;
int block_y = 0;
int block_z = 0;
int border_left = border_vec.size();
bool skip = false;
if(border_left <= thread_max) {
block_threads = border_left;
} else {
border_left -= thread_max;
// set block_x
if(border_left <= x_block_max) {
block_x = border_left;
} else {
block_x = x_block_max;
border_left -= x_block_max;
// if still more left set block_y
if(border_left <= y_block_max) {
block_y = border_left;
} else {
block_y = y_block_max;
border_left -= y_block_max;
// if still more left set block_z
if(border_left <= z_block_max) {
block_z = border_left;
} else {
//if you get to this point the border is quite large, just
// give up
skip = true;
}
}
}
}
if(!skip) {
dim3 grid_block(1<<block_x, 1<<block_y, 1<<block_z);
if(display_time)
std::cout<<"Border size: "<<border_vec.size()<<"\nThreads: "<<
pow(2.0,block_threads)<< " Grid: "<<grid_block.x<<" "<<
grid_block.y<<" "<<grid_block.z<<"\n";
double calcStartTime = CycleTimer::currentSeconds();
find_solvable_tiles<<< grid_block,
1<<block_threads >>>(width, height, device_grid, device_knowns,
border_elements, border_vec.size(), device_temp_grid,
known_border_elements, known_border_vec.size(),
thread_max, x_block_max, y_block_max);
cudaThreadSynchronize();
double calcEndTime = CycleTimer::currentSeconds();
if(display_time)
std::cout<<"Calc Time: "<<(calcEndTime-calcStartTime)<<"\n\n";
calcTime += (calcEndTime-calcStartTime);
// check if there is a solution for this border, if not unveil a random tile
//random_sol = false;
if(random_sol) {
bool solution=false;
cudaMemcpy(device_solution, &solution, sizeof(bool), cudaMemcpyHostToDevice);
border_has_solution<<<height, width>>>(width, device_knowns, device_border, it->first,
device_solution);
cudaThreadSynchronize();
cudaMemcpy(&solution, device_solution, sizeof(bool), cudaMemcpyDeviceToHost);
if(!solution) {
//std::cout<<"NO SOLUTION for border "<< it->first <<"\n";
changed_grid = true;
}
srand(time(NULL));
int c = 0;//don't want to repeat forever
while(!solution && c<border_vec.size()) {
int t = border_vec[rand()%border_vec.size()];
int x = IX(t), y = IY(t);
if(!BOMB(x,y)) {
std::cout<<"Revealed grid "<<x<<" "<<y<<"\n";
hint_display_cell(x, y, width, height, grid);
solution = true;
}
c++;
}
}
}
// clean up
cudaFree(border_elements);
cudaFree(known_border_elements);
}
if(display_time) {
std::cout<<"\nTotal Calc Time: "<<calcTime<<"\n";
double endTime = CycleTimer::currentSeconds();
std::cout<<"Prelim run time: "<<(prelimEndTime-prelimStartTime)<<
"\nMain work loop: "<<(endTime-startTime)<<"\n";
}
//cudaThreadSynchronize();
//cudaMemcpy(knowns, device_knowns, int_array_size, cudaMemcpyDeviceToHost);
//std::cout<<"\nBefore Clean:\n";
//printGrid(width,height,knowns);
finalize_knowns<<<height, width>>>(width, height, device_knowns);
clean_knowns<<<height, width>>>(width, height, device_grid, device_flags, device_knowns);
cudaThreadSynchronize();
cudaMemcpy(knowns, device_knowns, int_array_size, cudaMemcpyDeviceToHost);
cudaFree(device_grid);
cudaFree(device_flags);
cudaFree(device_knowns);
cudaFree(device_temp_grid);
cudaFree(device_border_done);
cudaFree(device_border);
cudaFree(device_solution);
free(prop);
delete(border);
if(display_time) {
double totalEndTime = CycleTimer::currentSeconds();
std::cout<<"Total execution time: "<<(totalEndTime-totalStartTime)<<"\n";
}
if(display_knowns || display_debug_grids) {
std::cout<<"\nFinal knowns:\n";
printGridCompact(width,height,knowns);
//std::cout<<"Done\n";
}
return changed_grid;
}
void
saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
/*
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 512;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_x;
float* device_y;
float* device_result;
//
// TODO allocate device memory buffers on the GPU using cudaMalloc
//
size_t size = sizeof(float)*N;
cudaMalloc(&device_x, size);
cudaMalloc(&device_y, size);
cudaMalloc(&device_result, size);
//
// TODO copy input arrays to the GPU using cudaMemcpy
//
cudaMemcpy(device_x, xarray, size, cudaMemcpyHostToDevice);
cudaMemcpy(device_y, yarray, size, cudaMemcpyHostToDevice);
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
// run kernel
saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result);
cudaThreadSynchronize();
// end timing after result has been copied back into host memory
double endTime = CycleTimer::currentSeconds();
//
// TODO copy result from GPU using cudaMemcpy
//
cudaMemcpy(resultarray, device_result, size, cudaMemcpyDeviceToHost);
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
// TODO free memory buffers on the GPU
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_result);
*/
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
}
|
3b4400d34d985f82f26d3d0c4f1f9cbc123ad631.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const scalar_t* __restrict__ gates,
const scalar_t* __restrict__ old_cell,
scalar_t* __restrict__ new_h,
scalar_t* __restrict__ new_cell,
scalar_t* __restrict__ input_gate,
scalar_t* __restrict__ output_gate,
scalar_t* __restrict__ candidate_cell,
size_t state_size) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int index = blockIdx.y * state_size + column;
const int gates_row = blockIdx.y * (state_size * 3);
if (column < state_size) {
input_gate[index] = sigmoid(gates[gates_row + column]);
output_gate[index] = sigmoid(gates[gates_row + state_size + column]);
candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]);
new_cell[index] =
old_cell[index] + candidate_cell[index] * input_gate[index];
new_h[index] = tanh(new_cell[index]) * output_gate[index];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gates = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
gates.data<scalar_t>(),
old_cell.data<scalar_t>(),
new_h.data<scalar_t>(),
new_cell.data<scalar_t>(),
input_gate.data<scalar_t>(),
output_gate.data<scalar_t>(),
candidate_cell.data<scalar_t>(),
state_size);
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} | 3b4400d34d985f82f26d3d0c4f1f9cbc123ad631.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const scalar_t* __restrict__ gates,
const scalar_t* __restrict__ old_cell,
scalar_t* __restrict__ new_h,
scalar_t* __restrict__ new_cell,
scalar_t* __restrict__ input_gate,
scalar_t* __restrict__ output_gate,
scalar_t* __restrict__ candidate_cell,
size_t state_size) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int index = blockIdx.y * state_size + column;
const int gates_row = blockIdx.y * (state_size * 3);
if (column < state_size) {
input_gate[index] = sigmoid(gates[gates_row + column]);
output_gate[index] = sigmoid(gates[gates_row + state_size + column]);
candidate_cell[index] = elu(gates[gates_row + 2 * state_size + column]);
new_cell[index] =
old_cell[index] + candidate_cell[index] * input_gate[index];
new_h[index] = tanh(new_cell[index]) * output_gate[index];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gates = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
gates.data<scalar_t>(),
old_cell.data<scalar_t>(),
new_h.data<scalar_t>(),
new_cell.data<scalar_t>(),
input_gate.data<scalar_t>(),
output_gate.data<scalar_t>(),
candidate_cell.data<scalar_t>(),
state_size);
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} |
686c5e36d646abd1334772f7f8cf2c76c1e41e54.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(hipPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale)
: MHARunner(numHeads, headSize, 2, causal_mask, scale),
mSm(sm),
mEnableFlashAttention(enable_flash_attention),
pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 686c5e36d646abd1334772f7f8cf2c76c1e41e54.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mScale;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(cudaPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads,
const int headSize,
const int sm,
bool causal_mask,
bool enable_flash_attention,
const float scale)
: MHARunner(numHeads, headSize, 2, causal_mask, scale),
mSm(sm),
mEnableFlashAttention(enable_flash_attention),
pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
7e49d9c6e17fcf88d37c65edc7872c77f5693981.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename U, typename T>
__device__ T bilinear_interpolate(const U* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForward(const int nthreads, const U* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename U, typename T>
__device__ T bilinear_interpolate_nhwc(const U* bottom_data,
const int height, const int width, const int channels,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[channels * (y_low * width + x_low)];
T v2 = bottom_data[channels * (y_low * width + x_high)];
T v3 = bottom_data[channels * (y_high * width + x_low)];
T v4 = bottom_data[channels * (y_high * width + x_high)];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForwardNHWC(const int nthreads, const U* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels * height * width + c);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate_nhwc(offset_bottom_data, height, width, channels, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename U, typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const U* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
U* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
template <typename U, typename T>
__global__ void RoIAlignBackwardFeatureNHWC(const int nthreads, const U* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
U* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels * height * width + c);
int top_offset = n * channels * pooled_height * pooled_width + c;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[channels * (ph * pooled_width + pw)];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_low), static_cast<T>(g1));
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_high), static_cast<T>(g2));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_low), static_cast<T>(g3));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_high), static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = is_nhwc ? input.size(3) : input.size(1);
auto height = is_nhwc ? input.size(1) : input.size(2);
auto width = is_nhwc ? input.size(2) : input.size(3);
auto output = is_nhwc ? at::empty({num_rois, pooled_height, pooled_width, channels}, input.options()) :
at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
int gridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignForward<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t, float>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForwardNHWC<scalar_t, float>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
// NHWC + layout transposes are faster than NCHW, so just keep the NHWC implementation for backward pass
at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = is_nhwc ? at::zeros({batch_size, height, width, channels}, grad.options()) :
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
int gridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignBackwardFeature<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t, float>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<float>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeatureNHWC<scalar_t, float>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<float>());
});
}
THCudaCheck(hipGetLastError());
return grad_input;
}
| 7e49d9c6e17fcf88d37c65edc7872c77f5693981.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename U, typename T>
__device__ T bilinear_interpolate(const U* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForward(const int nthreads, const U* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename U, typename T>
__device__ T bilinear_interpolate_nhwc(const U* bottom_data,
const int height, const int width, const int channels,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[channels * (y_low * width + x_low)];
T v2 = bottom_data[channels * (y_low * width + x_high)];
T v3 = bottom_data[channels * (y_high * width + x_low)];
T v4 = bottom_data[channels * (y_high * width + x_high)];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForwardNHWC(const int nthreads, const U* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels * height * width + c);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate_nhwc(offset_bottom_data, height, width, channels, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename U, typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const U* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
U* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
template <typename U, typename T>
__global__ void RoIAlignBackwardFeatureNHWC(const int nthreads, const U* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
U* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels * height * width + c);
int top_offset = n * channels * pooled_height * pooled_width + c;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[channels * (ph * pooled_width + pw)];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_low), static_cast<T>(g1));
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_high), static_cast<T>(g2));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_low), static_cast<T>(g3));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_high), static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = is_nhwc ? input.size(3) : input.size(1);
auto height = is_nhwc ? input.size(1) : input.size(2);
auto width = is_nhwc ? input.size(2) : input.size(3);
auto output = is_nhwc ? at::empty({num_rois, pooled_height, pooled_width, channels}, input.options()) :
at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
int gridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignForward<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t, float><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForwardNHWC<scalar_t, float><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
// NHWC + layout transposes are faster than NCHW, so just keep the NHWC implementation for backward pass
at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = is_nhwc ? at::zeros({batch_size, height, width, channels}, grad.options()) :
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
int gridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignBackwardFeature<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t, float><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<float>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeatureNHWC<scalar_t, float><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<float>());
});
}
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
884706e0ea0f7e5ff0344be7dc75839793b8fced.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zswapblk.cu, normal z -> s, Thu Oct 8 23:05:35 2020
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
typedef struct {
float *A;
float *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_sswapblk_params_t;
/******************************************************************************/
__global__ void magmagpu_sswapblkrm( magmagpu_sswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if ( y < params.n )
{
float *A = params.A + y - params.ldda;
float *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
float tmp1 = *A;
float *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
/******************************************************************************/
__global__ void magmagpu_sswapblkcm( magmagpu_sswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if ( y < params.n )
{
float *A = params.A + offset1 - 1;
float *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
float tmp1 = *A;
float *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/***************************************************************************//**
Blocked version: swap several pairs of lines.
Used in magma_ststrf() and magma_sssssm().
@ingroup magma_swapblk
*******************************************************************************/
extern "C" void
magmablas_sswapblk(
magma_order_t order, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( magma_ceildiv( n, blocksize ) );
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k < i2; k += BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_sswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_sswapblkcm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params );
}
}
else {
for( k=(i1-1); k < i2; k += BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_sswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_sswapblkrm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params );
}
}
}
| 884706e0ea0f7e5ff0344be7dc75839793b8fced.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zswapblk.cu, normal z -> s, Thu Oct 8 23:05:35 2020
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
typedef struct {
float *A;
float *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_sswapblk_params_t;
/******************************************************************************/
__global__ void magmagpu_sswapblkrm( magmagpu_sswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if ( y < params.n )
{
float *A = params.A + y - params.ldda;
float *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
float tmp1 = *A;
float *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
/******************************************************************************/
__global__ void magmagpu_sswapblkcm( magmagpu_sswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if ( y < params.n )
{
float *A = params.A + offset1 - 1;
float *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
float tmp1 = *A;
float *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/***************************************************************************//**
Blocked version: swap several pairs of lines.
Used in magma_ststrf() and magma_sssssm().
@ingroup magma_swapblk
*******************************************************************************/
extern "C" void
magmablas_sswapblk(
magma_order_t order, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( magma_ceildiv( n, blocksize ) );
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k < i2; k += BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_sswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_sswapblkcm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params );
}
}
else {
for( k=(i1-1); k < i2; k += BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_sswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_sswapblkrm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params );
}
}
}
|
2dd7e43638c10e9b1596fbf73140d937c244f63f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "canvas.h"
#include <iostream>
#include <string>
#include <fstream>
#include <cmath>
#include <cstdio>
#include <chrono>
class figure
{
public:
int R, G, B;
int type; // 1 = sphere | 2 = box | 3 = tetra
double x1, y1, z1;
double x2, y2, z2;
double x3, y3, z3;
double x4, y4, z4;
double Rad; // For sphere
figure(int type, int R, int G, int B,
double x1, double y1, double z1,
double x2, double y2, double z2,
double x3, double y3, double z3,
double x4, double y4, double z4, double Rad)
{
this->R = R;
this->G = G;
this->B = B;
this->type = type;
this->x1 = x1; this->y1 = y1; this->z1 = z1;
this->x2 = x2; this->y2 = y2; this->z2 = z2;
this->x3 = x3; this->y3 = y3; this->z3 = z3;
this->x4 = x4; this->y4 = y4; this->z4 = z4;
this->Rad = Rad;
}
figure(){}
};
__host__ figure* scene_objects(std::string filename, size_t* figure_count)
{
size_t fig_count = 0;
std::ifstream obj_file;
obj_file.open(filename);
if (!obj_file.is_open())
{
std::cout << "Error occured while tried to open " << filename << std::endl;
exit(-1);
}
std::string tmp_object;
while (getline(obj_file, tmp_object)) fig_count++;
if (!fig_count)
{
std::cout << "Error: no objects encountered." << std::endl;
exit(-1);
}
obj_file.clear();
obj_file.seekg(0);
figure* res = new figure[fig_count];
for (size_t i = 0; i < fig_count; ++i)
{
obj_file >> tmp_object;
if (tmp_object == "sphere")
{
int R, G, B;
double x, y, z, Rad;
obj_file >> R; obj_file >> G; obj_file >> B;
obj_file >> x; obj_file >> y; obj_file >> z;
obj_file >> Rad;
res[i] = figure(1, R, G, B, x, y, z,
0, 0, 0, 0, 0, 0, 0, 0, 0, Rad);
}
else
{
std::cout << "Error: unknown figure encountered." << std::endl;
exit(-1);
}
}
obj_file.close();
*figure_count = fig_count;
return res;
}
__host__ void scene_props(std::string filename,
double* light,
double* camera,
double* upvector,
double* screen_normal,
double& screen_distance,
double& view_depth,
int& screen_width,
int& screen_height)
{
std::ifstream props_file;
props_file.open(filename);
if (!props_file.is_open())
{
std::cout << "Error occured while tried to open " << filename << std::endl;
exit(-1);
}
std::string title;
for (int i = 0; i < 8; ++i)
{
props_file >> title;
switch (title.size())
{
case 6: //camera
props_file >> camera[0];
props_file >> camera[1];
props_file >> camera[2];
break;
case 5: //light
props_file >> light[0];
props_file >> light[1];
props_file >> light[2];
break;
case 7: //normal_
props_file >> screen_normal[0];
props_file >> screen_normal[1];
props_file >> screen_normal[2];
break;
case 8: //upvector
props_file >> upvector[0];
props_file >> upvector[1];
props_file >> upvector[2];
break;
case 11: //screen_dist
props_file >> screen_distance;
break;
case 10: //view_depth
props_file >> view_depth;
break;
case 12: //screen_width
props_file >> screen_width;
break;
case 13: //screen_height
props_file >> screen_height;
break;
default:
std::cout << "Error: unknown title encountered." << std::endl;
exit(-1);
}
}
props_file.close();
}
__host__ void to_length(double* vec, double length)
{
double K = sqrt(pow(length, 2) / (pow(vec[0], 2) + pow(vec[1], 2) + pow(vec[2], 2)));
vec[0] *= K; vec[1] *= K; vec[2] *= K;
}
__host__ std::pair<int, int> optimal_dimension(int screen_width,
int screen_height,
int max_threads,
int multiprocessors,
int threads_per_block)
{
int thread_scope = 505;
int pix_count = screen_width * screen_height;
if (pix_count % 5 != 0)
{
std::cout << "Error: unsupported image resolution." << std::endl;
std::cout << "Please add manually needed block dimension to function 'optimal dimension'" << std::endl;
exit(-1);
}
for (int dim = 500; dim >= 1; dim -= 5)
{
if (dim < thread_scope && pix_count % dim == 0 && pix_count / dim < max_threads)
{
thread_scope = dim;
}
}
int thread_number = pix_count / thread_scope;
int block_number = 3*multiprocessors;
while ((thread_number % block_number != 0) || (thread_number / block_number >= threads_per_block)) block_number++;
return std::pair<int, int>(thread_number, block_number);
}
__host__ double* pack_objects(figure* objects, size_t fig_count)
{
double* packed_objs = new double[fig_count * 17];
for (size_t i = 0; i < fig_count; ++i)
{
packed_objs[i * 17] = objects[i].type;
packed_objs[i * 17 + 1] = objects[i].R;
packed_objs[i * 17 + 2] = objects[i].G;
packed_objs[i * 17 + 3] = objects[i].B;
packed_objs[i * 17 + 4] = objects[i].x1;
packed_objs[i * 17 + 5] = objects[i].y1;
packed_objs[i * 17 + 6] = objects[i].z1;
if (objects[i].type == 1)
{
packed_objs[i * 17 + 16] = objects[i].Rad;
}
else
{
std::cout << "Error: unknown type" << std::endl;
exit(-1);
}
}
return packed_objs;
}
typedef struct
{
public:
double log_value;
}GPU_log;
__global__ void raytrace_kernel(uint8_t* frame,
double* objects,
double* geometry_data, int thread_scope, GPU_log* log)
// geometry_data = light upvect ortsup camera lu_corner w h f_count
{
// Unpacking to shared block memory section
__shared__ size_t f_count; f_count = geometry_data[17];
__shared__ int scr_width; scr_width = geometry_data[15];
__shared__ int scr_height; scr_height = geometry_data[16];
__shared__ double camera[3]; camera[0] = geometry_data[9]; camera[1] = geometry_data[10]; camera[2] = geometry_data[11];
__shared__ double light[3]; light[0] = geometry_data[0]; light[1] = geometry_data[1]; light[2] = geometry_data[2];
__shared__ double upvect[3]; upvect[0] = geometry_data[3]; upvect[1] = geometry_data[4]; upvect[2] = geometry_data[5];
__shared__ double ortsup[3]; ortsup[0] = geometry_data[6]; ortsup[1] = geometry_data[7]; ortsup[2] = geometry_data[8];
__shared__ double lu_cor[3]; lu_cor[0] = geometry_data[12]; lu_cor[1] = geometry_data[13]; lu_cor[2] = geometry_data[14];
int reverse_num = (threadIdx.x + blockIdx.x * blockDim.x)*thread_scope;
int geometry_num = scr_width * scr_height - reverse_num - 1;
// x = geom_num - width * y | integer equation
int y_scr_0 = 0, x_scr_0 = geometry_num - scr_width*y_scr_0;
while (x_scr_0 >= scr_width)
{
y_scr_0++;
x_scr_0 = geometry_num - scr_width * y_scr_0;
}
volatile bool intersected;
for (int i = 0; i < thread_scope + 1; ++i)
{
int x_scr = x_scr_0 + i;
int y_scr = y_scr_0;
double x_phys = lu_cor[0] + x_scr * ortsup[0] + y_scr * upvect[0];
double y_phys = lu_cor[1] + x_scr * ortsup[1] + y_scr * upvect[1];
double z_phys = lu_cor[2] + x_scr * ortsup[2] + y_scr * upvect[2];
double trace_ray[3] = { x_phys - camera[0], y_phys - camera[1], z_phys - camera[2] };
int R, G, B;
double surface_normal[3] = { 0,0,0 };
double intersection[3] = { 0,0,0 };
intersected = false;
for (size_t i = 0; i < f_count; ++i)
{
int i_ = i * 17;
if (objects[i_] == 1.0) // sphere
{
double A_ = trace_ray[0] * trace_ray[0] + trace_ray[1] * trace_ray[1] + trace_ray[2] * trace_ray[2];
double B_ = 2 * (trace_ray[0] * camera[0] + trace_ray[1] * camera[1] + trace_ray[2] * camera[2] -
trace_ray[0] * objects[i_ + 4] - trace_ray[1] * objects[i_ + 5] - trace_ray[2] * objects[i_ + 6]);
double C_ = (camera[0] - objects[i_ + 4]) * (camera[0] - objects[i_ + 4]) +
(camera[1] - objects[i_ + 5]) * (camera[1] - objects[i_ + 5]) +
(camera[2] - objects[i_ + 6]) * (camera[2] - objects[i_ + 6]) - objects[i_ + 16] * objects[i_ + 16];
double discr = B_ * B_ - 4 * A_ * C_;
if (discr <= 0) continue;
intersected = true;
double param_1 = (-B_ - sqrt(discr)) / (2 * A_), param_2 = (-B_ + sqrt(discr)) / (2 * A_);
(param_1 > param_2) ? param_1 = param_2 : param_2 = param_1;
double tmp_intersection[3] = { camera[0] + trace_ray[0] * param_1, camera[1] + trace_ray[1] * param_1,
camera[2] + trace_ray[2] * param_1 };
if (i == 0 || ((pow(tmp_intersection[0] - camera[0], 2) + pow(tmp_intersection[1] - camera[1], 2) +
pow(tmp_intersection[2] - camera[2], 2)) < (pow(intersection[0] - camera[0], 2 +
pow(intersection[1] - camera[1], 2) + pow(intersection[2] - camera[2], 2)))))
{
for (int i = 0; i < 3; ++i) intersection[i] = tmp_intersection[i];
for (int i = 0; i < 3; ++i) surface_normal[i] = intersection[i] - objects[i_ + 4 + i];
R = objects[i_ + 1]; G = objects[i_ + 2]; B = objects[i_ + 3];
}
}
else
{
/* Unknown shape type */
printf("Error: Unknown object type\n");
}
}
if (intersected == true)
{
double light_vect[3] = { light[0] - intersection[0], light[1] - intersection[1], light[2] - intersection[2] };
double cos_alpha = (light_vect[0] * surface_normal[0] + light_vect[1] * surface_normal[1] +
light_vect[2] * surface_normal[2]) / (sqrt(light_vect[0] * light_vect[0] + light_vect[1] * light_vect[1] +
light_vect[2] * light_vect[2]) * sqrt(surface_normal[0] * surface_normal[0] + surface_normal[1] * surface_normal[1] +
surface_normal[2] * surface_normal[2]));
if (cos_alpha + 0.2 < 0) cos_alpha = -0.2;
R = static_cast<int>(R * pow(cos_alpha + 0.2, 1.5)); if (R > 255) R = 255;
G = static_cast<int>(G * pow(cos_alpha + 0.2, 1.5)); if (G > 255) G = 255;
B = static_cast<int>(B * pow(cos_alpha + 0.2, 1.5)); if (B > 255) B = 255;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr] = B;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr + 1] = G;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr + 2] = R;
}
}
}
__host__ int main(int argc, char* argv[])
{
std::string objects = "objects.txt";
std::string props = "properties.txt";
std::string save_name = "image_2.bmp";
/* ========================= */
size_t fig_count;
double* light = new double[3];
double* camera = new double[3];
double* screen_normal = new double[3];
double* upvector = new double[3];
double* ort_sup = new double[3];
double* lu_corner = new double[3];
double screen_dist, view_depth;
int screen_width, screen_height;
// Getting scene props & objects
figure* Scene_objects = scene_objects(objects, &fig_count);
scene_props(props, light, camera, upvector, screen_normal, screen_dist, view_depth, screen_width, screen_height);
// Initializing image
BMP_Image frame(screen_width, screen_height);
for (int i = 0; i < frame.pixlen; ++i) frame.pixels[i] = 0;
// Some geometry
ort_sup[0] = upvector[1] * screen_normal[2] - upvector[2] * screen_normal[1];
ort_sup[1] = upvector[2] * screen_normal[0] - upvector[0] * screen_normal[2];
ort_sup[2] = upvector[0] * screen_normal[1] - upvector[1] * screen_normal[0];
to_length(screen_normal, screen_dist);
to_length(upvector, static_cast<double>(screen_height) / 2);
to_length(ort_sup, static_cast<double>(screen_width) / 2);
lu_corner[0] = camera[0] + screen_normal[0] + upvector[0] + ort_sup[0];
lu_corner[1] = camera[1] + screen_normal[1] + upvector[1] + ort_sup[1];
lu_corner[2] = camera[2] + screen_normal[2] + upvector[2] + ort_sup[2];
to_length(upvector, 1.0);
to_length(ort_sup, 1.0);
for (int i = 0; i < 3; ++i) upvector[i] *= -1, ort_sup[i] *= -1;
// Constructing thread-blocks grid
if (hipSetDevice(0) != hipSuccess)
{
std::cout << "Error occured while tried to initialize GPU" << std::endl;
exit(-1);
}
hipDeviceProp_t device_properties;
hipGetDeviceProperties(&device_properties, 0);
int mp_count = device_properties.multiProcessorCount;
int supported_threads = device_properties.maxThreadsPerMultiProcessor;
int sup_threads_per_block = device_properties.maxThreadsPerBlock;
int max_threads = mp_count * supported_threads;
int max_blocks = device_properties.maxBlocksPerMultiProcessor;
int max_surface = device_properties.maxSurface1D;
int shared_memory_per_block = device_properties.sharedMemPerBlock;
std::cout << "Max shared memory per block available: " << shared_memory_per_block << std::endl;
std::pair<int, int> grid_params = optimal_dimension(screen_width, screen_height, max_threads, mp_count, sup_threads_per_block);
int block_size = grid_params.first / grid_params.second;
int thread_scope = screen_width * screen_height / grid_params.first;
// Packing & transfering data to gpu
double* device_geom_data;
double* device_objs_data;
double* packed_vect_data = new double[18]; // light upvect ortsup camera lu_corner w h f_count
double* packed_objs_data = pack_objects(Scene_objects, fig_count);
uint8_t* device_canvas;
GPU_log* device_log, *host_log = new GPU_log;
packed_vect_data[0] = light[0]; packed_vect_data[1] = light[1]; packed_vect_data[2] = light[2];
packed_vect_data[3] = upvector[0]; packed_vect_data[4] = upvector[1]; packed_vect_data[5] = upvector[2];
packed_vect_data[6] = ort_sup[0]; packed_vect_data[7] = ort_sup[1]; packed_vect_data[8] = ort_sup[2];
packed_vect_data[9] = camera[0]; packed_vect_data[10] = camera[1]; packed_vect_data[11] = camera[2];
packed_vect_data[12] = lu_corner[0]; packed_vect_data[13] = lu_corner[1]; packed_vect_data[14] = lu_corner[2];
packed_vect_data[15] = screen_width; packed_vect_data[16] = screen_height; packed_vect_data[17] = fig_count;
if (hipMalloc(&device_geom_data, 18*sizeof(double)) != hipSuccess ||
hipMalloc(&device_objs_data, (17 * fig_count) * sizeof(double)) != hipSuccess ||
hipMalloc(&device_canvas, sizeof(uint8_t)*frame.pixlen) != hipSuccess ||
hipMalloc(&device_log, sizeof(GPU_log)) != hipSuccess)
{
std::cout << "Error occured while tried to allocate memory on GPU" << std::endl;
exit(-1);
}
if (hipMemcpy(device_geom_data, packed_vect_data, 18 * sizeof(double), hipMemcpyHostToDevice) != hipSuccess ||
hipMemcpy(device_objs_data, packed_objs_data, (17 * fig_count) * sizeof(double), hipMemcpyHostToDevice) != hipSuccess)
{
std::cout << "Error occured while tried to transfer data to GPU" << std::endl;
hipFree(device_geom_data);
hipFree(device_objs_data);
hipFree(device_canvas);
hipFree(device_log);
exit(-1);
}
dim3 grid_dimension; grid_dimension.x = grid_params.second;
grid_dimension.y = grid_dimension.z = 1;
dim3 block_dimension; block_dimension.x = block_size;
block_dimension.y = block_dimension.z = 1;
// ===============================
dim3 max_grid(device_properties.maxGridSize[0], device_properties.maxGridSize[1], device_properties.maxGridSize[2]);
dim3 max_block(device_properties.maxThreadsDim[0], device_properties.maxThreadsDim[1], device_properties.maxThreadsDim[2]);
std::cout << "Maximum grid dim is: (" << max_grid.x << " " << max_grid.y << " " << max_grid.z << ")" << std::endl;
std::cout << "Current grid dim is: (" << grid_dimension.x << " " << grid_dimension.y << " " << grid_dimension.z << ")" << std::endl;
std::cout << "Maximum block dim is: (" << max_block.x << " " << max_block.y << " " << max_block.z << ")" << std::endl;
std::cout << "Current block dim is: (" << block_dimension.x << " " << block_dimension.y << " " << block_dimension.z << ")" << std::endl;
// ===============================
// Invoking kernel
auto start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( raytrace_kernel) , dim3(grid_dimension), dim3(block_dimension), 0, 0, device_canvas,
device_objs_data,
device_geom_data,
thread_scope,
device_log);
if (hipMemcpy(frame.pixels, device_canvas, sizeof(uint8_t) * frame.pixlen, hipMemcpyDeviceToHost) != hipSuccess ||
hipMemcpy(host_log, device_log, sizeof(GPU_log), hipMemcpyDeviceToHost))
{
std::cout << "Error occured while tried to transfer data from GPU" << std::endl;
exit(-1);
}
auto end = std::chrono::system_clock::now();
int elapsed_ms = static_cast<int>(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
std::cout << "Elapsed ms: " << elapsed_ms << std::endl;
///* Revise logvalue if needed */
//std::cout << "Log value is: " << host_log->log_value << std::endl;
///* ========================= */
frame.save(save_name);
if (hipFree(device_geom_data) != hipSuccess ||
hipFree(device_objs_data) != hipSuccess ||
hipFree(device_canvas) != hipSuccess ||
hipFree(device_log) != hipSuccess)
{
std::cout << "Error occured while tried to free memory on GPU" << std::endl;
exit(-1);
}
delete[] packed_objs_data;
delete[] packed_vect_data;
delete[] light;
delete[] camera;
delete[] upvector;
delete[] ort_sup;
delete[] screen_normal;
delete[] lu_corner;
delete host_log;
return 0;
}
| 2dd7e43638c10e9b1596fbf73140d937c244f63f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "canvas.h"
#include <iostream>
#include <string>
#include <fstream>
#include <cmath>
#include <cstdio>
#include <chrono>
class figure
{
public:
int R, G, B;
int type; // 1 = sphere | 2 = box | 3 = tetra
double x1, y1, z1;
double x2, y2, z2;
double x3, y3, z3;
double x4, y4, z4;
double Rad; // For sphere
figure(int type, int R, int G, int B,
double x1, double y1, double z1,
double x2, double y2, double z2,
double x3, double y3, double z3,
double x4, double y4, double z4, double Rad)
{
this->R = R;
this->G = G;
this->B = B;
this->type = type;
this->x1 = x1; this->y1 = y1; this->z1 = z1;
this->x2 = x2; this->y2 = y2; this->z2 = z2;
this->x3 = x3; this->y3 = y3; this->z3 = z3;
this->x4 = x4; this->y4 = y4; this->z4 = z4;
this->Rad = Rad;
}
figure(){}
};
__host__ figure* scene_objects(std::string filename, size_t* figure_count)
{
size_t fig_count = 0;
std::ifstream obj_file;
obj_file.open(filename);
if (!obj_file.is_open())
{
std::cout << "Error occured while tried to open " << filename << std::endl;
exit(-1);
}
std::string tmp_object;
while (getline(obj_file, tmp_object)) fig_count++;
if (!fig_count)
{
std::cout << "Error: no objects encountered." << std::endl;
exit(-1);
}
obj_file.clear();
obj_file.seekg(0);
figure* res = new figure[fig_count];
for (size_t i = 0; i < fig_count; ++i)
{
obj_file >> tmp_object;
if (tmp_object == "sphere")
{
int R, G, B;
double x, y, z, Rad;
obj_file >> R; obj_file >> G; obj_file >> B;
obj_file >> x; obj_file >> y; obj_file >> z;
obj_file >> Rad;
res[i] = figure(1, R, G, B, x, y, z,
0, 0, 0, 0, 0, 0, 0, 0, 0, Rad);
}
else
{
std::cout << "Error: unknown figure encountered." << std::endl;
exit(-1);
}
}
obj_file.close();
*figure_count = fig_count;
return res;
}
__host__ void scene_props(std::string filename,
double* light,
double* camera,
double* upvector,
double* screen_normal,
double& screen_distance,
double& view_depth,
int& screen_width,
int& screen_height)
{
std::ifstream props_file;
props_file.open(filename);
if (!props_file.is_open())
{
std::cout << "Error occured while tried to open " << filename << std::endl;
exit(-1);
}
std::string title;
for (int i = 0; i < 8; ++i)
{
props_file >> title;
switch (title.size())
{
case 6: //camera
props_file >> camera[0];
props_file >> camera[1];
props_file >> camera[2];
break;
case 5: //light
props_file >> light[0];
props_file >> light[1];
props_file >> light[2];
break;
case 7: //normal_
props_file >> screen_normal[0];
props_file >> screen_normal[1];
props_file >> screen_normal[2];
break;
case 8: //upvector
props_file >> upvector[0];
props_file >> upvector[1];
props_file >> upvector[2];
break;
case 11: //screen_dist
props_file >> screen_distance;
break;
case 10: //view_depth
props_file >> view_depth;
break;
case 12: //screen_width
props_file >> screen_width;
break;
case 13: //screen_height
props_file >> screen_height;
break;
default:
std::cout << "Error: unknown title encountered." << std::endl;
exit(-1);
}
}
props_file.close();
}
__host__ void to_length(double* vec, double length)
{
double K = sqrt(pow(length, 2) / (pow(vec[0], 2) + pow(vec[1], 2) + pow(vec[2], 2)));
vec[0] *= K; vec[1] *= K; vec[2] *= K;
}
__host__ std::pair<int, int> optimal_dimension(int screen_width,
int screen_height,
int max_threads,
int multiprocessors,
int threads_per_block)
{
int thread_scope = 505;
int pix_count = screen_width * screen_height;
if (pix_count % 5 != 0)
{
std::cout << "Error: unsupported image resolution." << std::endl;
std::cout << "Please add manually needed block dimension to function 'optimal dimension'" << std::endl;
exit(-1);
}
for (int dim = 500; dim >= 1; dim -= 5)
{
if (dim < thread_scope && pix_count % dim == 0 && pix_count / dim < max_threads)
{
thread_scope = dim;
}
}
int thread_number = pix_count / thread_scope;
int block_number = 3*multiprocessors;
while ((thread_number % block_number != 0) || (thread_number / block_number >= threads_per_block)) block_number++;
return std::pair<int, int>(thread_number, block_number);
}
__host__ double* pack_objects(figure* objects, size_t fig_count)
{
double* packed_objs = new double[fig_count * 17];
for (size_t i = 0; i < fig_count; ++i)
{
packed_objs[i * 17] = objects[i].type;
packed_objs[i * 17 + 1] = objects[i].R;
packed_objs[i * 17 + 2] = objects[i].G;
packed_objs[i * 17 + 3] = objects[i].B;
packed_objs[i * 17 + 4] = objects[i].x1;
packed_objs[i * 17 + 5] = objects[i].y1;
packed_objs[i * 17 + 6] = objects[i].z1;
if (objects[i].type == 1)
{
packed_objs[i * 17 + 16] = objects[i].Rad;
}
else
{
std::cout << "Error: unknown type" << std::endl;
exit(-1);
}
}
return packed_objs;
}
typedef struct
{
public:
double log_value;
}GPU_log;
__global__ void raytrace_kernel(uint8_t* frame,
double* objects,
double* geometry_data, int thread_scope, GPU_log* log)
// geometry_data = light upvect ortsup camera lu_corner w h f_count
{
// Unpacking to shared block memory section
__shared__ size_t f_count; f_count = geometry_data[17];
__shared__ int scr_width; scr_width = geometry_data[15];
__shared__ int scr_height; scr_height = geometry_data[16];
__shared__ double camera[3]; camera[0] = geometry_data[9]; camera[1] = geometry_data[10]; camera[2] = geometry_data[11];
__shared__ double light[3]; light[0] = geometry_data[0]; light[1] = geometry_data[1]; light[2] = geometry_data[2];
__shared__ double upvect[3]; upvect[0] = geometry_data[3]; upvect[1] = geometry_data[4]; upvect[2] = geometry_data[5];
__shared__ double ortsup[3]; ortsup[0] = geometry_data[6]; ortsup[1] = geometry_data[7]; ortsup[2] = geometry_data[8];
__shared__ double lu_cor[3]; lu_cor[0] = geometry_data[12]; lu_cor[1] = geometry_data[13]; lu_cor[2] = geometry_data[14];
int reverse_num = (threadIdx.x + blockIdx.x * blockDim.x)*thread_scope;
int geometry_num = scr_width * scr_height - reverse_num - 1;
// x = geom_num - width * y | integer equation
int y_scr_0 = 0, x_scr_0 = geometry_num - scr_width*y_scr_0;
while (x_scr_0 >= scr_width)
{
y_scr_0++;
x_scr_0 = geometry_num - scr_width * y_scr_0;
}
volatile bool intersected;
for (int i = 0; i < thread_scope + 1; ++i)
{
int x_scr = x_scr_0 + i;
int y_scr = y_scr_0;
double x_phys = lu_cor[0] + x_scr * ortsup[0] + y_scr * upvect[0];
double y_phys = lu_cor[1] + x_scr * ortsup[1] + y_scr * upvect[1];
double z_phys = lu_cor[2] + x_scr * ortsup[2] + y_scr * upvect[2];
double trace_ray[3] = { x_phys - camera[0], y_phys - camera[1], z_phys - camera[2] };
int R, G, B;
double surface_normal[3] = { 0,0,0 };
double intersection[3] = { 0,0,0 };
intersected = false;
for (size_t i = 0; i < f_count; ++i)
{
int i_ = i * 17;
if (objects[i_] == 1.0) // sphere
{
double A_ = trace_ray[0] * trace_ray[0] + trace_ray[1] * trace_ray[1] + trace_ray[2] * trace_ray[2];
double B_ = 2 * (trace_ray[0] * camera[0] + trace_ray[1] * camera[1] + trace_ray[2] * camera[2] -
trace_ray[0] * objects[i_ + 4] - trace_ray[1] * objects[i_ + 5] - trace_ray[2] * objects[i_ + 6]);
double C_ = (camera[0] - objects[i_ + 4]) * (camera[0] - objects[i_ + 4]) +
(camera[1] - objects[i_ + 5]) * (camera[1] - objects[i_ + 5]) +
(camera[2] - objects[i_ + 6]) * (camera[2] - objects[i_ + 6]) - objects[i_ + 16] * objects[i_ + 16];
double discr = B_ * B_ - 4 * A_ * C_;
if (discr <= 0) continue;
intersected = true;
double param_1 = (-B_ - sqrt(discr)) / (2 * A_), param_2 = (-B_ + sqrt(discr)) / (2 * A_);
(param_1 > param_2) ? param_1 = param_2 : param_2 = param_1;
double tmp_intersection[3] = { camera[0] + trace_ray[0] * param_1, camera[1] + trace_ray[1] * param_1,
camera[2] + trace_ray[2] * param_1 };
if (i == 0 || ((pow(tmp_intersection[0] - camera[0], 2) + pow(tmp_intersection[1] - camera[1], 2) +
pow(tmp_intersection[2] - camera[2], 2)) < (pow(intersection[0] - camera[0], 2 +
pow(intersection[1] - camera[1], 2) + pow(intersection[2] - camera[2], 2)))))
{
for (int i = 0; i < 3; ++i) intersection[i] = tmp_intersection[i];
for (int i = 0; i < 3; ++i) surface_normal[i] = intersection[i] - objects[i_ + 4 + i];
R = objects[i_ + 1]; G = objects[i_ + 2]; B = objects[i_ + 3];
}
}
else
{
/* Unknown shape type */
printf("Error: Unknown object type\n");
}
}
if (intersected == true)
{
double light_vect[3] = { light[0] - intersection[0], light[1] - intersection[1], light[2] - intersection[2] };
double cos_alpha = (light_vect[0] * surface_normal[0] + light_vect[1] * surface_normal[1] +
light_vect[2] * surface_normal[2]) / (sqrt(light_vect[0] * light_vect[0] + light_vect[1] * light_vect[1] +
light_vect[2] * light_vect[2]) * sqrt(surface_normal[0] * surface_normal[0] + surface_normal[1] * surface_normal[1] +
surface_normal[2] * surface_normal[2]));
if (cos_alpha + 0.2 < 0) cos_alpha = -0.2;
R = static_cast<int>(R * pow(cos_alpha + 0.2, 1.5)); if (R > 255) R = 255;
G = static_cast<int>(G * pow(cos_alpha + 0.2, 1.5)); if (G > 255) G = 255;
B = static_cast<int>(B * pow(cos_alpha + 0.2, 1.5)); if (B > 255) B = 255;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr] = B;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr + 1] = G;
frame[(scr_height - y_scr - 1) * scr_width*3 + 3 * x_scr + 2] = R;
}
}
}
__host__ int main(int argc, char* argv[])
{
std::string objects = "objects.txt";
std::string props = "properties.txt";
std::string save_name = "image_2.bmp";
/* ========================= */
size_t fig_count;
double* light = new double[3];
double* camera = new double[3];
double* screen_normal = new double[3];
double* upvector = new double[3];
double* ort_sup = new double[3];
double* lu_corner = new double[3];
double screen_dist, view_depth;
int screen_width, screen_height;
// Getting scene props & objects
figure* Scene_objects = scene_objects(objects, &fig_count);
scene_props(props, light, camera, upvector, screen_normal, screen_dist, view_depth, screen_width, screen_height);
// Initializing image
BMP_Image frame(screen_width, screen_height);
for (int i = 0; i < frame.pixlen; ++i) frame.pixels[i] = 0;
// Some geometry
ort_sup[0] = upvector[1] * screen_normal[2] - upvector[2] * screen_normal[1];
ort_sup[1] = upvector[2] * screen_normal[0] - upvector[0] * screen_normal[2];
ort_sup[2] = upvector[0] * screen_normal[1] - upvector[1] * screen_normal[0];
to_length(screen_normal, screen_dist);
to_length(upvector, static_cast<double>(screen_height) / 2);
to_length(ort_sup, static_cast<double>(screen_width) / 2);
lu_corner[0] = camera[0] + screen_normal[0] + upvector[0] + ort_sup[0];
lu_corner[1] = camera[1] + screen_normal[1] + upvector[1] + ort_sup[1];
lu_corner[2] = camera[2] + screen_normal[2] + upvector[2] + ort_sup[2];
to_length(upvector, 1.0);
to_length(ort_sup, 1.0);
for (int i = 0; i < 3; ++i) upvector[i] *= -1, ort_sup[i] *= -1;
// Constructing thread-blocks grid
if (cudaSetDevice(0) != cudaSuccess)
{
std::cout << "Error occured while tried to initialize GPU" << std::endl;
exit(-1);
}
cudaDeviceProp device_properties;
cudaGetDeviceProperties(&device_properties, 0);
int mp_count = device_properties.multiProcessorCount;
int supported_threads = device_properties.maxThreadsPerMultiProcessor;
int sup_threads_per_block = device_properties.maxThreadsPerBlock;
int max_threads = mp_count * supported_threads;
int max_blocks = device_properties.maxBlocksPerMultiProcessor;
int max_surface = device_properties.maxSurface1D;
int shared_memory_per_block = device_properties.sharedMemPerBlock;
std::cout << "Max shared memory per block available: " << shared_memory_per_block << std::endl;
std::pair<int, int> grid_params = optimal_dimension(screen_width, screen_height, max_threads, mp_count, sup_threads_per_block);
int block_size = grid_params.first / grid_params.second;
int thread_scope = screen_width * screen_height / grid_params.first;
// Packing & transfering data to gpu
double* device_geom_data;
double* device_objs_data;
double* packed_vect_data = new double[18]; // light upvect ortsup camera lu_corner w h f_count
double* packed_objs_data = pack_objects(Scene_objects, fig_count);
uint8_t* device_canvas;
GPU_log* device_log, *host_log = new GPU_log;
packed_vect_data[0] = light[0]; packed_vect_data[1] = light[1]; packed_vect_data[2] = light[2];
packed_vect_data[3] = upvector[0]; packed_vect_data[4] = upvector[1]; packed_vect_data[5] = upvector[2];
packed_vect_data[6] = ort_sup[0]; packed_vect_data[7] = ort_sup[1]; packed_vect_data[8] = ort_sup[2];
packed_vect_data[9] = camera[0]; packed_vect_data[10] = camera[1]; packed_vect_data[11] = camera[2];
packed_vect_data[12] = lu_corner[0]; packed_vect_data[13] = lu_corner[1]; packed_vect_data[14] = lu_corner[2];
packed_vect_data[15] = screen_width; packed_vect_data[16] = screen_height; packed_vect_data[17] = fig_count;
if (cudaMalloc(&device_geom_data, 18*sizeof(double)) != cudaSuccess ||
cudaMalloc(&device_objs_data, (17 * fig_count) * sizeof(double)) != cudaSuccess ||
cudaMalloc(&device_canvas, sizeof(uint8_t)*frame.pixlen) != cudaSuccess ||
cudaMalloc(&device_log, sizeof(GPU_log)) != cudaSuccess)
{
std::cout << "Error occured while tried to allocate memory on GPU" << std::endl;
exit(-1);
}
if (cudaMemcpy(device_geom_data, packed_vect_data, 18 * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess ||
cudaMemcpy(device_objs_data, packed_objs_data, (17 * fig_count) * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess)
{
std::cout << "Error occured while tried to transfer data to GPU" << std::endl;
cudaFree(device_geom_data);
cudaFree(device_objs_data);
cudaFree(device_canvas);
cudaFree(device_log);
exit(-1);
}
dim3 grid_dimension; grid_dimension.x = grid_params.second;
grid_dimension.y = grid_dimension.z = 1;
dim3 block_dimension; block_dimension.x = block_size;
block_dimension.y = block_dimension.z = 1;
// ===============================
dim3 max_grid(device_properties.maxGridSize[0], device_properties.maxGridSize[1], device_properties.maxGridSize[2]);
dim3 max_block(device_properties.maxThreadsDim[0], device_properties.maxThreadsDim[1], device_properties.maxThreadsDim[2]);
std::cout << "Maximum grid dim is: (" << max_grid.x << " " << max_grid.y << " " << max_grid.z << ")" << std::endl;
std::cout << "Current grid dim is: (" << grid_dimension.x << " " << grid_dimension.y << " " << grid_dimension.z << ")" << std::endl;
std::cout << "Maximum block dim is: (" << max_block.x << " " << max_block.y << " " << max_block.z << ")" << std::endl;
std::cout << "Current block dim is: (" << block_dimension.x << " " << block_dimension.y << " " << block_dimension.z << ")" << std::endl;
// ===============================
// Invoking kernel
auto start = std::chrono::system_clock::now();
raytrace_kernel <<<grid_dimension, block_dimension>>> (device_canvas,
device_objs_data,
device_geom_data,
thread_scope,
device_log);
if (cudaMemcpy(frame.pixels, device_canvas, sizeof(uint8_t) * frame.pixlen, cudaMemcpyDeviceToHost) != cudaSuccess ||
cudaMemcpy(host_log, device_log, sizeof(GPU_log), cudaMemcpyDeviceToHost))
{
std::cout << "Error occured while tried to transfer data from GPU" << std::endl;
exit(-1);
}
auto end = std::chrono::system_clock::now();
int elapsed_ms = static_cast<int>(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
std::cout << "Elapsed ms: " << elapsed_ms << std::endl;
///* Revise logvalue if needed */
//std::cout << "Log value is: " << host_log->log_value << std::endl;
///* ========================= */
frame.save(save_name);
if (cudaFree(device_geom_data) != cudaSuccess ||
cudaFree(device_objs_data) != cudaSuccess ||
cudaFree(device_canvas) != cudaSuccess ||
cudaFree(device_log) != cudaSuccess)
{
std::cout << "Error occured while tried to free memory on GPU" << std::endl;
exit(-1);
}
delete[] packed_objs_data;
delete[] packed_vect_data;
delete[] light;
delete[] camera;
delete[] upvector;
delete[] ort_sup;
delete[] screen_normal;
delete[] lu_corner;
delete host_log;
return 0;
}
|
791db4ec4586ab081e72c1cb7c56a3b4862d19a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize(); // Wait for the GPU to finish before proceeding
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
} | 791db4ec4586ab081e72c1cb7c56a3b4862d19a1.cu | #include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize(); // Wait for the GPU to finish before proceeding
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
} |
e81b257182d1611830f9556aaa4cabc2d194ef67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../Common/ImageWriter.cuh"
#include "../Common/Vector3.cuh"
const int ImageWidth = 1024;
const int ImageHeight = 512;
const int BlockSize = 1;
// Kernel function to add the elements of two arrays
__global__
void CalculatePixels(int width, int height, Vector3* pixels)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int numPixels = width * height;
for (int k = index; k < numPixels; k += stride)
{
int j = height - 1 - (k / width);
int i = k % width;
pixels[k] = Vector3(
static_cast<float>(i) / static_cast<float>(width),
static_cast<float>(j) / static_cast<float>(height),
0.2);
}
}
int main(int argc, char** argv)
{
std::string fileName = ImageWriter::GetFileName(argc, argv);
// Allocate Unified Memory accessible from CPU or GPU
int numPixels = ImageWidth*ImageHeight;
Vector3 *pixels;
hipMallocManaged(&pixels, numPixels*sizeof(Vector3));
// Run kernel on the GPU
int numBlocks = (numPixels + BlockSize - 1) / BlockSize;
hipLaunchKernelGGL(( CalculatePixels), dim3(numBlocks), dim3(BlockSize), 0, 0, ImageWidth, ImageHeight, pixels);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
ImageWriter::WritePPM(fileName, ImageWidth, ImageHeight, pixels);
// Free memory
hipFree(pixels);
return 0;
} | e81b257182d1611830f9556aaa4cabc2d194ef67.cu | #include "../Common/ImageWriter.cuh"
#include "../Common/Vector3.cuh"
const int ImageWidth = 1024;
const int ImageHeight = 512;
const int BlockSize = 1;
// Kernel function to add the elements of two arrays
__global__
void CalculatePixels(int width, int height, Vector3* pixels)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int numPixels = width * height;
for (int k = index; k < numPixels; k += stride)
{
int j = height - 1 - (k / width);
int i = k % width;
pixels[k] = Vector3(
static_cast<float>(i) / static_cast<float>(width),
static_cast<float>(j) / static_cast<float>(height),
0.2);
}
}
int main(int argc, char** argv)
{
std::string fileName = ImageWriter::GetFileName(argc, argv);
// Allocate Unified Memory – accessible from CPU or GPU
int numPixels = ImageWidth*ImageHeight;
Vector3 *pixels;
cudaMallocManaged(&pixels, numPixels*sizeof(Vector3));
// Run kernel on the GPU
int numBlocks = (numPixels + BlockSize - 1) / BlockSize;
CalculatePixels<<<numBlocks, BlockSize>>>(ImageWidth, ImageHeight, pixels);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
ImageWriter::WritePPM(fileName, ImageWidth, ImageHeight, pixels);
// Free memory
cudaFree(pixels);
return 0;
} |
9e2a89bb7d48f5e4ae8c5c3f48488c55b7330941.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l2;
int xdim0_update_halo_kernel1_l2_h = -1;
__constant__ int xdim1_update_halo_kernel1_l2;
int xdim1_update_halo_kernel1_l2_h = -1;
__constant__ int xdim2_update_halo_kernel1_l2;
int xdim2_update_halo_kernel1_l2_h = -1;
__constant__ int xdim3_update_halo_kernel1_l2;
int xdim3_update_halo_kernel1_l2_h = -1;
__constant__ int xdim4_update_halo_kernel1_l2;
int xdim4_update_halo_kernel1_l2_h = -1;
__constant__ int xdim5_update_halo_kernel1_l2;
int xdim5_update_halo_kernel1_l2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y) (x + xdim0_update_halo_kernel1_l2 * (y))
#define OPS_ACC1(x, y) (x + xdim1_update_halo_kernel1_l2 * (y))
#define OPS_ACC2(x, y) (x + xdim2_update_halo_kernel1_l2 * (y))
#define OPS_ACC3(x, y) (x + xdim3_update_halo_kernel1_l2 * (y))
#define OPS_ACC4(x, y) (x + xdim4_update_halo_kernel1_l2 * (y))
#define OPS_ACC5(x, y) (x + xdim5_update_halo_kernel1_l2 * (y))
// user function
__device__
inline void
update_halo_kernel1_l2_gpu(double *density0, double *energy0,
double *energy1, double *u, double *p,
double *sd, const int *fields) {
if (fields[FIELD_DENSITY] == 1)
density0[OPS_ACC0(0, 0)] = density0[OPS_ACC0(3, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC1(0, 0)] = energy0[OPS_ACC1(3, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC2(0, 0)] = energy1[OPS_ACC2(3, 0)];
if (fields[FIELD_U] == 1)
u[OPS_ACC3(0, 0)] = u[OPS_ACC3(3, 0)];
if (fields[FIELD_P] == 1)
p[OPS_ACC4(0, 0)] = p[OPS_ACC4(3, 0)];
if (fields[FIELD_SD] == 1)
sd[OPS_ACC5(0, 0)] = sd[OPS_ACC5(3, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_update_halo_kernel1_l2(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
const int *__restrict arg6, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_l2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_l2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_l2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_l2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_l2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_l2;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_l2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_l2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 53))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel1_l2");
OPS_kernels[53].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_l2_h ||
xdim1 != xdim1_update_halo_kernel1_l2_h ||
xdim2 != xdim2_update_halo_kernel1_l2_h ||
xdim3 != xdim3_update_halo_kernel1_l2_h ||
xdim4 != xdim4_update_halo_kernel1_l2_h ||
xdim5 != xdim5_update_halo_kernel1_l2_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_l2, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_l2_h = xdim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_l2, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_l2_h = xdim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_l2, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_l2_h = xdim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_l2, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_l2_h = xdim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_l2, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_l2_h = xdim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_l2, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_l2_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_l2), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d, x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 53;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 53;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg6.data, NUM_FIELDS * sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel1_l2");
}
ops_enqueue_kernel(desc);
}
#endif
| 9e2a89bb7d48f5e4ae8c5c3f48488c55b7330941.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_l2;
int xdim0_update_halo_kernel1_l2_h = -1;
__constant__ int xdim1_update_halo_kernel1_l2;
int xdim1_update_halo_kernel1_l2_h = -1;
__constant__ int xdim2_update_halo_kernel1_l2;
int xdim2_update_halo_kernel1_l2_h = -1;
__constant__ int xdim3_update_halo_kernel1_l2;
int xdim3_update_halo_kernel1_l2_h = -1;
__constant__ int xdim4_update_halo_kernel1_l2;
int xdim4_update_halo_kernel1_l2_h = -1;
__constant__ int xdim5_update_halo_kernel1_l2;
int xdim5_update_halo_kernel1_l2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y) (x + xdim0_update_halo_kernel1_l2 * (y))
#define OPS_ACC1(x, y) (x + xdim1_update_halo_kernel1_l2 * (y))
#define OPS_ACC2(x, y) (x + xdim2_update_halo_kernel1_l2 * (y))
#define OPS_ACC3(x, y) (x + xdim3_update_halo_kernel1_l2 * (y))
#define OPS_ACC4(x, y) (x + xdim4_update_halo_kernel1_l2 * (y))
#define OPS_ACC5(x, y) (x + xdim5_update_halo_kernel1_l2 * (y))
// user function
__device__
inline void
update_halo_kernel1_l2_gpu(double *density0, double *energy0,
double *energy1, double *u, double *p,
double *sd, const int *fields) {
if (fields[FIELD_DENSITY] == 1)
density0[OPS_ACC0(0, 0)] = density0[OPS_ACC0(3, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC1(0, 0)] = energy0[OPS_ACC1(3, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC2(0, 0)] = energy1[OPS_ACC2(3, 0)];
if (fields[FIELD_U] == 1)
u[OPS_ACC3(0, 0)] = u[OPS_ACC3(3, 0)];
if (fields[FIELD_P] == 1)
p[OPS_ACC4(0, 0)] = p[OPS_ACC4(3, 0)];
if (fields[FIELD_SD] == 1)
sd[OPS_ACC5(0, 0)] = sd[OPS_ACC5(3, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_update_halo_kernel1_l2(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
const int *__restrict arg6, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_l2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_l2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_l2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_l2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_l2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_l2;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_l2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_l2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 53))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel1_l2");
OPS_kernels[53].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_l2_h ||
xdim1 != xdim1_update_halo_kernel1_l2_h ||
xdim2 != xdim2_update_halo_kernel1_l2_h ||
xdim3 != xdim3_update_halo_kernel1_l2_h ||
xdim4 != xdim4_update_halo_kernel1_l2_h ||
xdim5 != xdim5_update_halo_kernel1_l2_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_l2, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_l2_h = xdim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_l2, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_l2_h = xdim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_l2, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_l2_h = xdim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_l2, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_l2_h = xdim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_l2, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_l2_h = xdim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_l2, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_l2_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_l2<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d, x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 53;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 53;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg6.data, NUM_FIELDS * sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel1_l2");
}
ops_enqueue_kernel(desc);
}
#endif
|
ebcc99bb3a17d3f666b06172dca31793d7152647.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r)
{
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c)
{
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgba = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgba.x;
greenChannel[thread_1D_pos] = rgba.y;
blueChannel[thread_1D_pos] = rgba.z;
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
size_t block_size = 32;
const dim3 blockSize(block_size, block_size, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil(double(numCols)/block_size), ceil(double(numRows) / block_size), 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// void gaussian_blur(const unsigned char* const inputChannel,
// unsigned char* const outputChannel,
// int numRows, int numCols,
// const float* const filter, const int filterWidth)
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| ebcc99bb3a17d3f666b06172dca31793d7152647.cu | #include <stdio.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r)
{
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c)
{
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgba = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgba.x;
greenChannel[thread_1D_pos] = rgba.y;
blueChannel[thread_1D_pos] = rgba.z;
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
size_t block_size = 32;
const dim3 blockSize(block_size, block_size, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil(double(numCols)/block_size), ceil(double(numRows) / block_size), 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// void gaussian_blur(const unsigned char* const inputChannel,
// unsigned char* const outputChannel,
// int numRows, int numCols,
// const float* const filter, const int filterWidth)
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
717bdee869017db0d26ed99f1f2e7737d90618b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This CUDA program implements vector addition on both the CPU & GPU
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Function declarations
float *CPU_add_vectors(float *A, float *B, int N);
float *GPU_add_vectors(float *A, float *B, int N);
float *get_random_vector(int N);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void check_error(hipError_t e);
// The number of blocks and threads per blocks in the GPU kernel. If we define them as constant as being
// done here, then we can use its value in the kernel, for example to statically declare an array in
// shared memory. Note that to determine the best block count and threads per block for a particular GPU
// you should check its hardware specification. You can loose performance substantially due to a wrong
// choice for these parameters.
const int BLOCK_COUNT = 14;
const int THREADS_PER_BLOCK = 256;
int main(int argc, char **argv) {
// Seed the random generator (use a constant here for repeatable results)
srand(5);
// Determine the vector length
int N = 100000; // default value
if (argc > 1) N = atoi(argv[1]); // user-specified value
// Generate two random vectors
long long vector_start_time = start_timer();
float *A = get_random_vector(N);
float *B = get_random_vector(N);
stop_timer(vector_start_time, "Vector generation");
// Compute their sum on the CPU
long long CPU_start_time = start_timer();
float *C_CPU = CPU_add_vectors(A, B, N);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// Compute their sum on the GPU
long long GPU_start_time = start_timer();
float *C_GPU = GPU_add_vectors(A, B, N);
long long GPU_time = stop_timer(GPU_start_time, "\tTotal");
// Compute the speedup or slowdown
if (GPU_time > CPU_time) {
printf("\nCPU outperformed GPU by %.2fx\n", (float) GPU_time / (float) CPU_time);
} else {
printf("\nGPU outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
}
// Check the correctness of the GPU results
int num_wrong = 0;
for (int i = 0; i < N; i++) {
if (fabs(C_CPU[i] - C_GPU[i]) > 0.0001) {
printf("Values differs at index %d CPU:%f\tGPU:%f\n", i, C_CPU[i], C_GPU[i]);
num_wrong++;
}
}
// Report the correctness results
if (num_wrong) {
printf("\n%d / %d values incorrect\n", num_wrong, N);
} else {
printf("\nAll values correct\n");
}
}
// A GPU kernel that computes the vector sum A + B
__global__ void add_vectors_kernel(float *A, float *B, float *C, int N) {
// determine the index of the thread among all GPU threads
int blockId = blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
int threadCount = gridDim.x * blockDim.x;
// calculate the vector sum for the indexes of vector the current thread is responsible for
for (int i = threadId; i < N; i += threadCount) {
C[i] = A[i] + B[i];
}
}
// Returns the vector sum A + B (computed on the GPU)
float *GPU_add_vectors(float *A_CPU, float *B_CPU, int N) {
long long memory_start_time = start_timer();
// Allocate GPU memory for the inputs and the result
int vector_size = N * sizeof(float);
float *A_GPU, *B_GPU, *C_GPU;
check_error(hipMalloc((void **) &A_GPU, vector_size));
check_error(hipMalloc((void **) &B_GPU, vector_size));
check_error(hipMalloc((void **) &C_GPU, vector_size));
// Transfer the input vectors to GPU memory
check_error(hipMemcpy(A_GPU, A_CPU, vector_size, hipMemcpyHostToDevice));
check_error(hipMemcpy(B_GPU, B_CPU, vector_size, hipMemcpyHostToDevice));
stop_timer(memory_start_time, "\nGPU:\tTransfer to GPU");
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time = start_timer();
// Note that we are using a one dimensional grid in this calculation as that is ideal for this
// particular problem. For some other problem, a 2D or even a 3D grid may be appropriate. The
// dimensionality of the grid is supposed to help you decompose the algorithmic logic inside the
// GPU kernel. In particular, how you decide what thread should do what instruction. It does not
// affect the performance of the kernel.
hipLaunchKernelGGL(( add_vectors_kernel) , dim3(BLOCK_COUNT), dim3(THREADS_PER_BLOCK), 0, 0, A_GPU, B_GPU, C_GPU, N);
// make the CPU main thread waite for the GPU kernel call to complete
hipDeviceSynchronize(); // This is only needed for timing and error-checking purposes
stop_timer(kernel_start_time, "\tKernel execution");
// Check for kernel errors
check_error(hipGetLastError());
// Allocate CPU memory for the result
float *C_CPU = (float *) malloc(vector_size);
if (C_CPU == NULL) die("Error allocating CPU memory");
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
check_error(hipMemcpy(C_CPU, C_GPU, vector_size, hipMemcpyDeviceToHost));
stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
check_error(hipFree(A_GPU));
check_error(hipFree(B_GPU));
check_error(hipFree(C_GPU));
return C_CPU;
}
// Returns the vector sum A + B
float *CPU_add_vectors(float *A, float *B, int N) {
// Allocate memory for the result
float *C = (float *) malloc(N * sizeof(float));
if (C == NULL) die("Error allocating CPU memory");
// Compute the sum;
for (int i = 0; i < N; i++) C[i] = A[i] + B[i];
// Return the result
return C;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V = (float *) malloc(N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand();
// Return the randomized vector
return V;
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *label) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
| 717bdee869017db0d26ed99f1f2e7737d90618b0.cu | // This CUDA program implements vector addition on both the CPU & GPU
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Function declarations
float *CPU_add_vectors(float *A, float *B, int N);
float *GPU_add_vectors(float *A, float *B, int N);
float *get_random_vector(int N);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void check_error(cudaError e);
// The number of blocks and threads per blocks in the GPU kernel. If we define them as constant as being
// done here, then we can use its value in the kernel, for example to statically declare an array in
// shared memory. Note that to determine the best block count and threads per block for a particular GPU
// you should check its hardware specification. You can loose performance substantially due to a wrong
// choice for these parameters.
const int BLOCK_COUNT = 14;
const int THREADS_PER_BLOCK = 256;
int main(int argc, char **argv) {
// Seed the random generator (use a constant here for repeatable results)
srand(5);
// Determine the vector length
int N = 100000; // default value
if (argc > 1) N = atoi(argv[1]); // user-specified value
// Generate two random vectors
long long vector_start_time = start_timer();
float *A = get_random_vector(N);
float *B = get_random_vector(N);
stop_timer(vector_start_time, "Vector generation");
// Compute their sum on the CPU
long long CPU_start_time = start_timer();
float *C_CPU = CPU_add_vectors(A, B, N);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// Compute their sum on the GPU
long long GPU_start_time = start_timer();
float *C_GPU = GPU_add_vectors(A, B, N);
long long GPU_time = stop_timer(GPU_start_time, "\tTotal");
// Compute the speedup or slowdown
if (GPU_time > CPU_time) {
printf("\nCPU outperformed GPU by %.2fx\n", (float) GPU_time / (float) CPU_time);
} else {
printf("\nGPU outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
}
// Check the correctness of the GPU results
int num_wrong = 0;
for (int i = 0; i < N; i++) {
if (fabs(C_CPU[i] - C_GPU[i]) > 0.0001) {
printf("Values differs at index %d CPU:%f\tGPU:%f\n", i, C_CPU[i], C_GPU[i]);
num_wrong++;
}
}
// Report the correctness results
if (num_wrong) {
printf("\n%d / %d values incorrect\n", num_wrong, N);
} else {
printf("\nAll values correct\n");
}
}
// A GPU kernel that computes the vector sum A + B
__global__ void add_vectors_kernel(float *A, float *B, float *C, int N) {
// determine the index of the thread among all GPU threads
int blockId = blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
int threadCount = gridDim.x * blockDim.x;
// calculate the vector sum for the indexes of vector the current thread is responsible for
for (int i = threadId; i < N; i += threadCount) {
C[i] = A[i] + B[i];
}
}
// Returns the vector sum A + B (computed on the GPU)
float *GPU_add_vectors(float *A_CPU, float *B_CPU, int N) {
long long memory_start_time = start_timer();
// Allocate GPU memory for the inputs and the result
int vector_size = N * sizeof(float);
float *A_GPU, *B_GPU, *C_GPU;
check_error(cudaMalloc((void **) &A_GPU, vector_size));
check_error(cudaMalloc((void **) &B_GPU, vector_size));
check_error(cudaMalloc((void **) &C_GPU, vector_size));
// Transfer the input vectors to GPU memory
check_error(cudaMemcpy(A_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(B_GPU, B_CPU, vector_size, cudaMemcpyHostToDevice));
stop_timer(memory_start_time, "\nGPU:\tTransfer to GPU");
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time = start_timer();
// Note that we are using a one dimensional grid in this calculation as that is ideal for this
// particular problem. For some other problem, a 2D or even a 3D grid may be appropriate. The
// dimensionality of the grid is supposed to help you decompose the algorithmic logic inside the
// GPU kernel. In particular, how you decide what thread should do what instruction. It does not
// affect the performance of the kernel.
add_vectors_kernel <<<BLOCK_COUNT, THREADS_PER_BLOCK>>> (A_GPU, B_GPU, C_GPU, N);
// make the CPU main thread waite for the GPU kernel call to complete
cudaThreadSynchronize(); // This is only needed for timing and error-checking purposes
stop_timer(kernel_start_time, "\tKernel execution");
// Check for kernel errors
check_error(cudaGetLastError());
// Allocate CPU memory for the result
float *C_CPU = (float *) malloc(vector_size);
if (C_CPU == NULL) die("Error allocating CPU memory");
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
check_error(cudaMemcpy(C_CPU, C_GPU, vector_size, cudaMemcpyDeviceToHost));
stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
check_error(cudaFree(A_GPU));
check_error(cudaFree(B_GPU));
check_error(cudaFree(C_GPU));
return C_CPU;
}
// Returns the vector sum A + B
float *CPU_add_vectors(float *A, float *B, int N) {
// Allocate memory for the result
float *C = (float *) malloc(N * sizeof(float));
if (C == NULL) die("Error allocating CPU memory");
// Compute the sum;
for (int i = 0; i < N; i++) C[i] = A[i] + B[i];
// Return the result
return C;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V = (float *) malloc(N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand();
// Return the randomized vector
return V;
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *label) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
9c17913fbc9bb61f26d49996c3229735525bdae9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/sequence_ops.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void AddPaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
const T* padding_start_ptr,
int start_padding_width_blocks,
const T* padding_end_ptr,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] + prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
int end_padding_width = end_padding_width_blocks * block_size;
// start pad
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) {
T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0);
out_ptr[i] = fill;
}
// payload
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[i + start_padding_width] = in[in_start_idx + i];
}
// end pad
for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) {
T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0);
out_ptr[i + start_padding_width + len] = fill;
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks + start_padding_width_blocks + end_padding_width_blocks;
}
}
template <typename T>
__global__ void RemovePaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
int start_padding_width_blocks,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] - prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
// payload
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[in_start_idx + i] = in[i + start_padding_width];
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks - (start_padding_width_blocks + end_padding_width_blocks);
}
}
template <bool Inclusive = true>
void lengths_prefix_sum(
const int32_t* lengths,
int32_t num_items,
Tensor* prefix_buffer,
Tensor* prefix_sum,
CUDAContext* context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum->Resize(num_items);
if (Inclusive) {
hipcub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
} else {
hipcub::DeviceScan::ExclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t);
prefix_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(prefix_buffer->template mutable_data<int32_t>());
if (Inclusive) {
hipcub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
} else {
hipcub::DeviceScan::ExclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
}
} // namespace
template <>
template <typename T>
bool AddPaddingOp<CUDAContext>::MakePadding(
const T* in_ptr,
T* out_ptr,
const int32_t* lengths_ptr,
int32_t lengths_size,
int32_t outer_size,
const T* padding_start_ptr,
const T* padding_end_ptr,
int64_t block_size) {
// Step 1: compute prefix sum over the lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>());
lengths_out_ptr = lengths_out->template mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
hipLaunchKernelGGL(( AddPaddingKernel<T>)
, dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
padding_start_ptr,
startPaddingWidth_,
padding_end_ptr,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>);
template <>
template <typename T>
bool RemovePaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.dim(), 1);
const int32_t outer_size = in.sizes()[0];
const auto block_size = std::accumulate(
in.sizes().begin() + 1, in.sizes().end(), 1, std::multiplies<int64_t>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.numel();
}
auto out_dims = in.sizes().vec();
out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
auto* out = Output(0, out_dims, at::dtype<T>());
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the (padded) lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>());
lengths_out_ptr = lengths_out->template mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
hipLaunchKernelGGL(( RemovePaddingKernel<T>)
, dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
startPaddingWidth_,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
template <typename T>
__global__ void gather_padding_kernel(
const int K,
const int N,
const int Y0Width,
const int Y1Width,
const T* X,
const int* I,
const int* L,
T* Y0,
T* Y1) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage y0_tmp;
__shared__ typename BlockReduce::TempStorage y1_tmp;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T sum_1 = T(0);
T sum_2 = T(0);
for (int j = threadIdx.x; j < K * Y0Width; j += blockDim.x) {
const int j1 = j / Y0Width;
const int j2 = j % Y0Width;
const int idx1 = N * (L[j1] + j2);
sum_1 += X[idx1 + i];
}
for (int j = threadIdx.x; j < K * Y1Width; j += blockDim.x) {
const int j1 = j / Y1Width;
const int j2 = j % Y1Width;
const int idx1 = N * L[j1];
const int idx2 = idx1 + N * (I[j1] - Y1Width + j2);
sum_2 += X[idx2 + i];
}
sum_1 = BlockReduce(y0_tmp).Reduce(sum_1, hipcub::Sum());
sum_2 = BlockReduce(y1_tmp).Reduce(sum_2, hipcub::Sum());
if (threadIdx.x == 0) {
Y0[i] = sum_1;
Y0 != Y1 ? Y1[i] = sum_2 : Y0[i] = sum_1 + sum_2;
}
__syncthreads();
}
}
template <>
template <typename T>
void GatherPaddingOp<CUDAContext>::GatherPadding(
const int outer_size,
const int lengths_size,
const int block_size,
const int pad_width,
const T* in_ptr,
const int* lengths_ptr,
T* padding_start_ptr,
T* padding_end_ptr) {
if (lengths_size > 0) {
lengths_prefix_sum<false>(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
hipLaunchKernelGGL(( gather_padding_kernel<T>)
, dim3(min(block_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
lengths_size,
block_size,
startPaddingWidth_,
endPaddingWidth_,
in_ptr,
lengths_ptr,
lengths_prefix_sum_.template data<int>(),
padding_start_ptr,
padding_end_ptr);
}
}
REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(GatherPadding, GatherPaddingOp<CUDAContext>);
} // namespace caffe2
| 9c17913fbc9bb61f26d49996c3229735525bdae9.cu | #include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/sequence_ops.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void AddPaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
const T* padding_start_ptr,
int start_padding_width_blocks,
const T* padding_end_ptr,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] + prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
int end_padding_width = end_padding_width_blocks * block_size;
// start pad
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) {
T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0);
out_ptr[i] = fill;
}
// payload
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[i + start_padding_width] = in[in_start_idx + i];
}
// end pad
for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) {
T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0);
out_ptr[i + start_padding_width + len] = fill;
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks + start_padding_width_blocks + end_padding_width_blocks;
}
}
template <typename T>
__global__ void RemovePaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
int start_padding_width_blocks,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] - prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
// payload
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[in_start_idx + i] = in[i + start_padding_width];
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks - (start_padding_width_blocks + end_padding_width_blocks);
}
}
template <bool Inclusive = true>
void lengths_prefix_sum(
const int32_t* lengths,
int32_t num_items,
Tensor* prefix_buffer,
Tensor* prefix_sum,
CUDAContext* context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum->Resize(num_items);
if (Inclusive) {
cub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
} else {
cub::DeviceScan::ExclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t);
prefix_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(prefix_buffer->template mutable_data<int32_t>());
if (Inclusive) {
cub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
} else {
cub::DeviceScan::ExclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->template mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
}
} // namespace
template <>
template <typename T>
bool AddPaddingOp<CUDAContext>::MakePadding(
const T* in_ptr,
T* out_ptr,
const int32_t* lengths_ptr,
int32_t lengths_size,
int32_t outer_size,
const T* padding_start_ptr,
const T* padding_end_ptr,
int64_t block_size) {
// Step 1: compute prefix sum over the lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>());
lengths_out_ptr = lengths_out->template mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
AddPaddingKernel<T>
<<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
padding_start_ptr,
startPaddingWidth_,
padding_end_ptr,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>);
template <>
template <typename T>
bool RemovePaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.dim(), 1);
const int32_t outer_size = in.sizes()[0];
const auto block_size = std::accumulate(
in.sizes().begin() + 1, in.sizes().end(), 1, std::multiplies<int64_t>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.numel();
}
auto out_dims = in.sizes().vec();
out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
auto* out = Output(0, out_dims, at::dtype<T>());
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the (padded) lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>());
lengths_out_ptr = lengths_out->template mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
RemovePaddingKernel<T>
<<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
startPaddingWidth_,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
template <typename T>
__global__ void gather_padding_kernel(
const int K,
const int N,
const int Y0Width,
const int Y1Width,
const T* X,
const int* I,
const int* L,
T* Y0,
T* Y1) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage y0_tmp;
__shared__ typename BlockReduce::TempStorage y1_tmp;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T sum_1 = T(0);
T sum_2 = T(0);
for (int j = threadIdx.x; j < K * Y0Width; j += blockDim.x) {
const int j1 = j / Y0Width;
const int j2 = j % Y0Width;
const int idx1 = N * (L[j1] + j2);
sum_1 += X[idx1 + i];
}
for (int j = threadIdx.x; j < K * Y1Width; j += blockDim.x) {
const int j1 = j / Y1Width;
const int j2 = j % Y1Width;
const int idx1 = N * L[j1];
const int idx2 = idx1 + N * (I[j1] - Y1Width + j2);
sum_2 += X[idx2 + i];
}
sum_1 = BlockReduce(y0_tmp).Reduce(sum_1, cub::Sum());
sum_2 = BlockReduce(y1_tmp).Reduce(sum_2, cub::Sum());
if (threadIdx.x == 0) {
Y0[i] = sum_1;
Y0 != Y1 ? Y1[i] = sum_2 : Y0[i] = sum_1 + sum_2;
}
__syncthreads();
}
}
template <>
template <typename T>
void GatherPaddingOp<CUDAContext>::GatherPadding(
const int outer_size,
const int lengths_size,
const int block_size,
const int pad_width,
const T* in_ptr,
const int* lengths_ptr,
T* padding_start_ptr,
T* padding_end_ptr) {
if (lengths_size > 0) {
lengths_prefix_sum<false>(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
gather_padding_kernel<T>
<<<min(block_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
lengths_size,
block_size,
startPaddingWidth_,
endPaddingWidth_,
in_ptr,
lengths_ptr,
lengths_prefix_sum_.template data<int>(),
padding_start_ptr,
padding_end_ptr);
}
}
REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(GatherPadding, GatherPaddingOp<CUDAContext>);
} // namespace caffe2
|
f3c31d9e4efb99326c8218d6fcd0e4e2c364a70d.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include <stdexcept>
using namespace std;
void check(hipError_t ret) {
if (ret != hipSuccess) {
throw runtime_error(hipGetErrorString(ret));
}
}
void check() {
check(hipGetLastError());
}
| f3c31d9e4efb99326c8218d6fcd0e4e2c364a70d.cu | #include "utils.h"
#include <stdexcept>
using namespace std;
void check(cudaError_t ret) {
if (ret != cudaSuccess) {
throw runtime_error(cudaGetErrorString(ret));
}
}
void check() {
check(cudaGetLastError());
}
|
a18baaaf0dcbce087616d0be8024a6b00a2923b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace wilson {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_WILSON_DIRAC
#define DD_CLOVER 0
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#undef DD_CLOVER
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace wilson
// declare the dslash events
#include <dslash_events.cuh>
using namespace wilson;
#ifdef GPU_WILSON_DIRAC
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * in->VolumeCB(); } // FIXME for multi-GPU
};
#endif // GPU_WILSON_DIRAC
#include <dslash_policy.cuh>
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
}
| a18baaaf0dcbce087616d0be8024a6b00a2923b4.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace wilson {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_WILSON_DIRAC
#define DD_CLOVER 0
#include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover)
#undef DD_CLOVER
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace wilson
// declare the dslash events
#include <dslash_events.cuh>
using namespace wilson;
#ifdef GPU_WILSON_DIRAC
template <typename sFloat, typename gFloat>
class WilsonDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const double a;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200) // Fermi uses shared memory for common input
if (dslashParam.kernel_type == INTERIOR_KERNEL) { // Interior kernels use shared memory for common iunput
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else { // Exterior kernels use no shared memory
return 0;
}
#else // Pre-Fermi uses shared memory only for pseudo-registers
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
WilsonDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double a, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), a(a)
{
bindSpinorTex<sFloat>(in, out, x);
}
virtual ~WilsonDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DSLASH(dslash, tp.grid, tp.block, tp.shared_bytes, stream,
dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a);
}
long long flops() const { return (x ? 1368ll : 1320ll) * in->VolumeCB(); } // FIXME for multi-GPU
};
#endif // GPU_WILSON_DIRAC
#include <dslash_policy.cuh>
// Wilson wrappers
void wilsonDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in,
const int parity, const int dagger, const cudaColorSpinorField *x, const double &k,
const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#ifdef GPU_WILSON_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge %d and spinor %d precision not supported",
gauge.Precision(), in->Precision());
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new WilsonDslashCuda<double2, double2>(out, (double2*)gauge0, (double2*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new WilsonDslashCuda<float4, float4>(out, (float4*)gauge0, (float4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new WilsonDslashCuda<short4, short4>(out, (short4*)gauge0, (short4*)gauge1,
gauge.Reconstruct(), in, x, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume(), in->GhostFace(), profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Wilson dslash has not been built");
#endif // GPU_WILSON_DIRAC
}
}
|
dab5f828464c4221d1e9a3ddb8a8104f9039d8a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
__global__ void CustomAddMulDivBpropKernel(float *input1, float *input2, float *input3, float *input4, float *input5,
float *output1, float *output2, size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output1[idx] = input3[idx] + input4[idx] * input2[idx] + input5[idx] / input2[idx];
output2[idx] = input3[idx] + input4[idx] * input1[idx] - input5[idx] * input1[idx] / input2[idx] / input2[idx];
}
}
extern "C" int CustomAddMulDivBprop(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
hipStream_t custream = static_cast<hipStream_t>(stream);
constexpr int OUTPUT_INDEX = 6;
constexpr int TOTAL_PARAM_NUM = 7;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are five inputs and two outputs, so the nparam should be 7.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
for (int i = 0; i < nparam; i++) {
if (strcmp(dtypes[i], "float32") != 0) {
return 2;
}
}
// input1's index is 0, input2's index is 1, input3's index is 2, input4's index is 3, input5's index is 4
// output1's index is 5 and output2's index is 6
void *input1 = params[0];
void *input2 = params[1];
void *input3 = params[2];
void *input4 = params[3];
void *input5 = params[4];
void *output1 = params[5];
void *output2 = params[6];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
hipLaunchKernelGGL(( CustomAddMulDivBpropKernel), dim3(n + 1), dim3(THREADS), 0, custream,
static_cast<float *>(input1), static_cast<float *>(input2), static_cast<float *>(input3),
static_cast<float *>(input4), static_cast<float *>(input5), static_cast<float *>(output1),
static_cast<float *>(output2), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
| dab5f828464c4221d1e9a3ddb8a8104f9039d8a9.cu | /**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
__global__ void CustomAddMulDivBpropKernel(float *input1, float *input2, float *input3, float *input4, float *input5,
float *output1, float *output2, size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output1[idx] = input3[idx] + input4[idx] * input2[idx] + input5[idx] / input2[idx];
output2[idx] = input3[idx] + input4[idx] * input1[idx] - input5[idx] * input1[idx] / input2[idx] / input2[idx];
}
}
extern "C" int CustomAddMulDivBprop(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
cudaStream_t custream = static_cast<cudaStream_t>(stream);
constexpr int OUTPUT_INDEX = 6;
constexpr int TOTAL_PARAM_NUM = 7;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are five inputs and two outputs, so the nparam should be 7.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
for (int i = 0; i < nparam; i++) {
if (strcmp(dtypes[i], "float32") != 0) {
return 2;
}
}
// input1's index is 0, input2's index is 1, input3's index is 2, input4's index is 3, input5's index is 4
// output1's index is 5 and output2's index is 6
void *input1 = params[0];
void *input2 = params[1];
void *input3 = params[2];
void *input4 = params[3];
void *input5 = params[4];
void *output1 = params[5];
void *output2 = params[6];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
CustomAddMulDivBpropKernel<<<n + 1, THREADS, 0, custream>>>(
static_cast<float *>(input1), static_cast<float *>(input2), static_cast<float *>(input3),
static_cast<float *>(input4), static_cast<float *>(input5), static_cast<float *>(output1),
static_cast<float *>(output2), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
|
f559d01d66c0e7e14aa09e56accd31f38b57e928.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <cmath>
//#include <cutil_inline.h>
#include <helper_cuda.h>
#define cutilSafeCall(x) checkCudaErrors(x)
#include "vegas.h"
#include "vegasconst.h"
#include "kernels.h"
#include "gvegas.h"
#include "getrusage_sec.h"
void gVegas(double& avgi, double& sd, double& chi2a)
{
for (int j=0;j<ndim;j++) {
xi[j][0] = 1.;
}
// entry vegas1
it = 0;
// entry vegas2
nd = nd_max;
ng = 1;
npg = 0;
if (mds!=0) {
ng = (int)pow((0.5*(double)ncall),1./(double)ndim);
mds = 1;
if (2*ng>=nd_max) {
mds = -1;
npg = ng/nd_max+1;
nd = ng/npg;
ng = npg*nd;
}
}
cutilSafeCall(hipMemcpyToSymbol(g_ndim, &ndim, sizeof(int)));
cutilSafeCall(hipMemcpyToSymbol(g_ng, &ng, sizeof(int)));
cutilSafeCall(hipMemcpyToSymbol(g_nd, &nd, sizeof(int)));
hipDeviceSynchronize(); // wait for synchronize
nCubes = (unsigned)(pow(ng,ndim));
cutilSafeCall(hipMemcpyToSymbol(g_nCubes, &nCubes, sizeof(nCubes)));
hipDeviceSynchronize(); // wait for synchronize
npg = ncall/nCubes;
if (npg<2) npg = 2;
calls = (double)(npg*nCubes);
unsigned nCubeNpg = nCubes*npg;
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << vegas internal parameters >>"<<std::endl;
std::cout<<" ng: "<<std::setw(5)<<ng<<std::endl;
std::cout<<" nd: "<<std::setw(5)<<nd<<std::endl;
std::cout<<" npg: "<<std::setw(5)<<npg<<std::endl;
std::cout<<" nCubes: "<<std::setw(12)<<nCubes<<std::endl;
std::cout<<" nCubes*npg: "<<std::setw(12)<<nCubeNpg<<std::endl;
}
dxg = 1./(double)ng;
double dnpg = (double)npg;
double dv2g = calls*calls*pow(dxg,ndim)*pow(dxg,ndim)/(dnpg*dnpg*(dnpg-1.));
xnd = (double)nd;
dxg *= xnd;
xjac = 1./(double)calls;
for (int j=0;j<ndim;j++) {
dx[j] = xu[j]-xl[j];
xjac *= dx[j];
}
cutilSafeCall(hipMemcpyToSymbol(g_npg, &npg, sizeof(int)));
cutilSafeCall(hipMemcpyToSymbol(g_xjac, &xjac, sizeof(double)));
cutilSafeCall(hipMemcpyToSymbol(g_dxg, &dxg, sizeof(double)));
hipDeviceSynchronize(); // wait for synchronize
ndo = 1;
if (nd!=ndo) {
double rc = (double)ndo/xnd;
for (int j=0;j<ndim;j++) {
int k = -1;
double xn = 0.;
double dr = 0.;
int i = k;
k++;
dr += 1.;
double xo = xn;
xn = xi[j][k];
while (i<nd-1) {
while (dr<=rc) {
k++;
dr += 1.;
xo = xn;
xn = xi[j][k];
}
i++;
dr -= rc;
xin[i] = xn - (xn-xo)*dr;
}
for (int i=0;i<nd-1;i++) {
xi[j][i] = (double)xin[i];
}
xi[j][nd-1] = 1.;
}
ndo = nd;
}
cutilSafeCall(hipMemcpyToSymbol(g_xl, xl, sizeof(xl)));
cutilSafeCall(hipMemcpyToSymbol(g_dx, dx, sizeof(dx)));
cutilSafeCall(hipMemcpyToSymbol(g_xi, xi, sizeof(xi)));
hipDeviceSynchronize(); // wait for synchronize
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << input parameters for vegas >>"<<std::endl;
std::cout<<" ndim ="<<std::setw(3)<<ndim
<<" ncall ="<<std::setw(10)<<(int)calls<<std::endl;
std::cout<<" it = 0"
<<" itmx ="<<std::setw(5)<<itmx<<std::endl;
std::cout<<" acc = "<<std::fixed
<<std::setw(9)<<std::setprecision(3)<<acc<<std::endl;
std::cout<<" mds ="<<std::setw(3)<<mds
<<" nd = "<<std::setw(4)<<nd<<std::endl;
for (int j=0;j<ndim;j++) {
std::cout<<" (xl,xu)= ( "<<std::setw(6)<<std::fixed
<<xl[j]<<" , "<<xu[j]<<" )"<<std::endl;
}
}
// entry vegas3
it = 0;
si = 0.;
si2 = 0.;
swgt = 0.;
schi = 0.;
//--------------------------
// Set up kernel vaiables
//--------------------------
const int nGridSizeMax = 65535;
dim3 ThBk(nBlockSize);
int nGridSizeX, nGridSizeY;
int nBlockTot = (nCubeNpg-1)/nBlockSize+1;
nGridSizeY = (nBlockTot-1)/nGridSizeMax+1;
nGridSizeX = (nBlockTot-1)/nGridSizeY+1;
dim3 BkGd(nGridSizeX, nGridSizeY);
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << kernel parameters for CUDA >>"<<std::endl;
std::cout<<" Block size ="<<std::setw(7)<<ThBk.x<<std::endl;
std::cout<<" Grid size ="<<std::setw(7)<<BkGd.x
<<" x "<<BkGd.y<<std::endl;
int nThreadsTot = ThBk.x*BkGd.x*BkGd.y;
std::cout<<" Actual Number of calls ="<<std::setw(12)
<<nThreadsTot<<std::endl;
std::cout<<" Required Number of calls ="<<std::setw(12)
<<nCubeNpg<<" ( "<<std::setw(6)<<std::setprecision(2)
<<100.*(double)nCubeNpg/(double)nThreadsTot<<"%)"<<std::endl;
std::cout<<std::endl;
}
// allocate Fval
int sizeFval = nCubeNpg*sizeof(double);
// CPU
double* hFval;
cutilSafeCall(hipHostMalloc((void**)&hFval, sizeFval));
memset(hFval, '\0', sizeFval);
// GPU
double* gFval;
cutilSafeCall(hipMalloc((void**)&gFval, sizeFval));
// allocate IAval
int sizeIAval = nCubeNpg*ndim*sizeof(int);
// CPU
int* hIAval;
cutilSafeCall(hipHostMalloc((void**)&hIAval, sizeIAval));
memset(hIAval, '\0', sizeIAval);
// GPU
int* gIAval;
cutilSafeCall(hipMalloc((void**)&gIAval, sizeIAval));
double startVegasCall, endVegasCall;
double startVegasMove, endVegasMove;
double startVegasFill, endVegasFill;
double startVegasRefine, endVegasRefine;
do {
it++;
// startVegasCall = getrusage_usec();
hipLaunchKernelGGL(( gVegasCallFunc), dim3(BkGd), dim3(ThBk), 0, 0, gFval, gIAval);
hipDeviceSynchronize(); // wait for synchronize
// endVegasCall = getrusage_usec();
timeVegasCall += endVegasCall-startVegasCall;
// startVegasMove = getrusage_usec();
cutilSafeCall(hipMemcpy(hFval, gFval, sizeFval,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(hIAval, gIAval, sizeIAval,
hipMemcpyDeviceToHost));
// endVegasMove = getrusage_usec();
timeVegasMove += endVegasMove-startVegasMove;
// *****************
// startVegasFill = getrusage_usec();
ti = 0.;
tsi = 0.;
double d[ndim_max][nd_max];
for (int j=0;j<ndim;++j) {
for (int i=0;i<nd;++i) {
d[j][i] = 0.;
}
}
for (unsigned ig=0;ig<nCubes;ig++) {
double fb = 0.;
double f2b = 0.;
for (int ipg=0;ipg<npg;ipg++) {
int idx = npg*ig+ipg;
double f = hFval[idx];
double f2 = f*f;
fb += f;
f2b += f2;
}
f2b = sqrt(f2b*npg);
f2b = (f2b-fb)*(f2b+fb);
ti += fb;
tsi += f2b;
if (mds<0) {
int idx = npg*ig;
for (int idim=0;idim<ndim;idim++) {
int iaj = hIAval[idim*nCubeNpg+idx];
d[idim][iaj] += f2b;
}
}
}
if (mds>0) {
for (int idim=0;idim<ndim;idim++) {
int idimCube = idim*nCubeNpg;
for (int idx=0;idx<nCubeNpg;idx++) {
double f = hFval[idx];
int iaj = hIAval[idimCube+idx];
d[idim][iaj] += f*f;
}
}
}
// endVegasFill = getrusage_usec();
timeVegasFill += endVegasFill-startVegasFill;
tsi *= dv2g;
double ti2 = ti*ti;
double wgt = ti2/tsi;
si += ti*wgt;
si2 += ti2;
swgt += wgt;
schi += ti2*wgt;
avgi = si/swgt;
sd = swgt*it/si2;
chi2a = 0.;
if (it>1) chi2a = sd*(schi/swgt-avgi*avgi)/((double)it-1.);
sd = sqrt(1./sd);
if (nprn!=0) {
tsi = sqrt(tsi);
std::cout<<std::endl;
std::cout<<" << integration by vegas >>"<<std::endl;
std::cout<<" iteration no. "<<std::setw(4)<<it
<<std::setw(10)<<std::setprecision(6)
<<" integral= "<<ti<<std::endl;
std::cout<<" std dev = "<<tsi<<std::endl;
std::cout<<" accumulated results: integral = "<<avgi<<std::endl;
std::cout<<" std dev = "<<sd<<std::endl;
if (it > 1) {
std::cout<<" chi**2 per it'n = "
<<std::setw(10)<<std::setprecision(4)<<chi2a<<std::endl;
}
if (nprn<0) {
for (int j=0;j<ndim;j++) {
std::cout<<" == data for axis "
<<std::setw(2)<<j<<" --"<<std::endl;
std::cout<<" x delt i convce";
std::cout<<" x delt i convce";
std::cout<<" x delt i convce"<<std::endl;
}
}
}
// refine grid
// startVegasRefine = getrusage_usec();
double r[nd_max];
double dt[ndim_max];
for (int j=0;j<ndim;j++) {
double xo = d[j][0];
double xn = d[j][1];
d[j][0] = 0.5*(xo+xn);
dt[j] = d[j][0];
for (int i=1;i<nd-1;i++) {
d[j][i] = xo+xn;
xo = xn;
xn = d[j][i+1];
d[j][i] = (d[j][i]+xn)/3.;
dt[j] += d[j][i];
}
d[j][nd-1] = 0.5*(xn+xo);
dt[j] += d[j][nd-1];
}
for (int j=0;j<ndim;j++) {
double rc = 0.;
for (int i=0;i<nd;i++) {
r[i] = 0.;
if (d[j][i]>0.) {
double xo = dt[j]/d[j][i];
if (!isinf(xo))
r[i] = pow(((xo-1.)/xo/log(xo)),alph);
}
rc += r[i];
}
rc /= xnd;
int k = -1;
double xn = 0.;
double dr = xn;
int i = k;
k++;
dr += r[k];
double xo = xn;
xn = xi[j][k];
do {
while (dr<=rc) {
k++;
dr += r[k];
xo = xn;
xn = xi[j][k];
}
i++;
dr -= rc;
xin[i] = xn-(xn-xo)*dr/r[k];
} while (i<nd-2);
for (int i=0;i<nd-1;i++) {
xi[j][i] = (double)xin[i];
}
xi[j][nd-1] = 1.;
}
cutilSafeCall(hipMemcpyToSymbol(g_xi, xi, sizeof(xi)));
hipDeviceSynchronize(); // wait for synchronize
// endVegasRefine = getrusage_usec();
timeVegasRefine += endVegasRefine-startVegasRefine;
} while (it<itmx && acc*fabs(avgi)<sd);
cutilSafeCall(hipHostFree(hFval));
cutilSafeCall(hipFree(gFval));
cutilSafeCall(hipHostFree(hIAval));
cutilSafeCall(hipFree(gIAval));
}
| f559d01d66c0e7e14aa09e56accd31f38b57e928.cu | #include <iostream>
#include <iomanip>
#include <cmath>
//#include <cutil_inline.h>
#include <helper_cuda.h>
#define cutilSafeCall(x) checkCudaErrors(x)
#include "vegas.h"
#include "vegasconst.h"
#include "kernels.h"
#include "gvegas.h"
#include "getrusage_sec.h"
void gVegas(double& avgi, double& sd, double& chi2a)
{
for (int j=0;j<ndim;j++) {
xi[j][0] = 1.;
}
// entry vegas1
it = 0;
// entry vegas2
nd = nd_max;
ng = 1;
npg = 0;
if (mds!=0) {
ng = (int)pow((0.5*(double)ncall),1./(double)ndim);
mds = 1;
if (2*ng>=nd_max) {
mds = -1;
npg = ng/nd_max+1;
nd = ng/npg;
ng = npg*nd;
}
}
cutilSafeCall(cudaMemcpyToSymbol(g_ndim, &ndim, sizeof(int)));
cutilSafeCall(cudaMemcpyToSymbol(g_ng, &ng, sizeof(int)));
cutilSafeCall(cudaMemcpyToSymbol(g_nd, &nd, sizeof(int)));
cudaThreadSynchronize(); // wait for synchronize
nCubes = (unsigned)(pow(ng,ndim));
cutilSafeCall(cudaMemcpyToSymbol(g_nCubes, &nCubes, sizeof(nCubes)));
cudaThreadSynchronize(); // wait for synchronize
npg = ncall/nCubes;
if (npg<2) npg = 2;
calls = (double)(npg*nCubes);
unsigned nCubeNpg = nCubes*npg;
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << vegas internal parameters >>"<<std::endl;
std::cout<<" ng: "<<std::setw(5)<<ng<<std::endl;
std::cout<<" nd: "<<std::setw(5)<<nd<<std::endl;
std::cout<<" npg: "<<std::setw(5)<<npg<<std::endl;
std::cout<<" nCubes: "<<std::setw(12)<<nCubes<<std::endl;
std::cout<<" nCubes*npg: "<<std::setw(12)<<nCubeNpg<<std::endl;
}
dxg = 1./(double)ng;
double dnpg = (double)npg;
double dv2g = calls*calls*pow(dxg,ndim)*pow(dxg,ndim)/(dnpg*dnpg*(dnpg-1.));
xnd = (double)nd;
dxg *= xnd;
xjac = 1./(double)calls;
for (int j=0;j<ndim;j++) {
dx[j] = xu[j]-xl[j];
xjac *= dx[j];
}
cutilSafeCall(cudaMemcpyToSymbol(g_npg, &npg, sizeof(int)));
cutilSafeCall(cudaMemcpyToSymbol(g_xjac, &xjac, sizeof(double)));
cutilSafeCall(cudaMemcpyToSymbol(g_dxg, &dxg, sizeof(double)));
cudaThreadSynchronize(); // wait for synchronize
ndo = 1;
if (nd!=ndo) {
double rc = (double)ndo/xnd;
for (int j=0;j<ndim;j++) {
int k = -1;
double xn = 0.;
double dr = 0.;
int i = k;
k++;
dr += 1.;
double xo = xn;
xn = xi[j][k];
while (i<nd-1) {
while (dr<=rc) {
k++;
dr += 1.;
xo = xn;
xn = xi[j][k];
}
i++;
dr -= rc;
xin[i] = xn - (xn-xo)*dr;
}
for (int i=0;i<nd-1;i++) {
xi[j][i] = (double)xin[i];
}
xi[j][nd-1] = 1.;
}
ndo = nd;
}
cutilSafeCall(cudaMemcpyToSymbol(g_xl, xl, sizeof(xl)));
cutilSafeCall(cudaMemcpyToSymbol(g_dx, dx, sizeof(dx)));
cutilSafeCall(cudaMemcpyToSymbol(g_xi, xi, sizeof(xi)));
cudaThreadSynchronize(); // wait for synchronize
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << input parameters for vegas >>"<<std::endl;
std::cout<<" ndim ="<<std::setw(3)<<ndim
<<" ncall ="<<std::setw(10)<<(int)calls<<std::endl;
std::cout<<" it = 0"
<<" itmx ="<<std::setw(5)<<itmx<<std::endl;
std::cout<<" acc = "<<std::fixed
<<std::setw(9)<<std::setprecision(3)<<acc<<std::endl;
std::cout<<" mds ="<<std::setw(3)<<mds
<<" nd = "<<std::setw(4)<<nd<<std::endl;
for (int j=0;j<ndim;j++) {
std::cout<<" (xl,xu)= ( "<<std::setw(6)<<std::fixed
<<xl[j]<<" , "<<xu[j]<<" )"<<std::endl;
}
}
// entry vegas3
it = 0;
si = 0.;
si2 = 0.;
swgt = 0.;
schi = 0.;
//--------------------------
// Set up kernel vaiables
//--------------------------
const int nGridSizeMax = 65535;
dim3 ThBk(nBlockSize);
int nGridSizeX, nGridSizeY;
int nBlockTot = (nCubeNpg-1)/nBlockSize+1;
nGridSizeY = (nBlockTot-1)/nGridSizeMax+1;
nGridSizeX = (nBlockTot-1)/nGridSizeY+1;
dim3 BkGd(nGridSizeX, nGridSizeY);
if (nprn!=0) {
std::cout<<std::endl;
std::cout<<" << kernel parameters for CUDA >>"<<std::endl;
std::cout<<" Block size ="<<std::setw(7)<<ThBk.x<<std::endl;
std::cout<<" Grid size ="<<std::setw(7)<<BkGd.x
<<" x "<<BkGd.y<<std::endl;
int nThreadsTot = ThBk.x*BkGd.x*BkGd.y;
std::cout<<" Actual Number of calls ="<<std::setw(12)
<<nThreadsTot<<std::endl;
std::cout<<" Required Number of calls ="<<std::setw(12)
<<nCubeNpg<<" ( "<<std::setw(6)<<std::setprecision(2)
<<100.*(double)nCubeNpg/(double)nThreadsTot<<"%)"<<std::endl;
std::cout<<std::endl;
}
// allocate Fval
int sizeFval = nCubeNpg*sizeof(double);
// CPU
double* hFval;
cutilSafeCall(cudaMallocHost((void**)&hFval, sizeFval));
memset(hFval, '\0', sizeFval);
// GPU
double* gFval;
cutilSafeCall(cudaMalloc((void**)&gFval, sizeFval));
// allocate IAval
int sizeIAval = nCubeNpg*ndim*sizeof(int);
// CPU
int* hIAval;
cutilSafeCall(cudaMallocHost((void**)&hIAval, sizeIAval));
memset(hIAval, '\0', sizeIAval);
// GPU
int* gIAval;
cutilSafeCall(cudaMalloc((void**)&gIAval, sizeIAval));
double startVegasCall, endVegasCall;
double startVegasMove, endVegasMove;
double startVegasFill, endVegasFill;
double startVegasRefine, endVegasRefine;
do {
it++;
// startVegasCall = getrusage_usec();
gVegasCallFunc<<<BkGd, ThBk>>>(gFval, gIAval);
cudaThreadSynchronize(); // wait for synchronize
// endVegasCall = getrusage_usec();
timeVegasCall += endVegasCall-startVegasCall;
// startVegasMove = getrusage_usec();
cutilSafeCall(cudaMemcpy(hFval, gFval, sizeFval,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(hIAval, gIAval, sizeIAval,
cudaMemcpyDeviceToHost));
// endVegasMove = getrusage_usec();
timeVegasMove += endVegasMove-startVegasMove;
// *****************
// startVegasFill = getrusage_usec();
ti = 0.;
tsi = 0.;
double d[ndim_max][nd_max];
for (int j=0;j<ndim;++j) {
for (int i=0;i<nd;++i) {
d[j][i] = 0.;
}
}
for (unsigned ig=0;ig<nCubes;ig++) {
double fb = 0.;
double f2b = 0.;
for (int ipg=0;ipg<npg;ipg++) {
int idx = npg*ig+ipg;
double f = hFval[idx];
double f2 = f*f;
fb += f;
f2b += f2;
}
f2b = sqrt(f2b*npg);
f2b = (f2b-fb)*(f2b+fb);
ti += fb;
tsi += f2b;
if (mds<0) {
int idx = npg*ig;
for (int idim=0;idim<ndim;idim++) {
int iaj = hIAval[idim*nCubeNpg+idx];
d[idim][iaj] += f2b;
}
}
}
if (mds>0) {
for (int idim=0;idim<ndim;idim++) {
int idimCube = idim*nCubeNpg;
for (int idx=0;idx<nCubeNpg;idx++) {
double f = hFval[idx];
int iaj = hIAval[idimCube+idx];
d[idim][iaj] += f*f;
}
}
}
// endVegasFill = getrusage_usec();
timeVegasFill += endVegasFill-startVegasFill;
tsi *= dv2g;
double ti2 = ti*ti;
double wgt = ti2/tsi;
si += ti*wgt;
si2 += ti2;
swgt += wgt;
schi += ti2*wgt;
avgi = si/swgt;
sd = swgt*it/si2;
chi2a = 0.;
if (it>1) chi2a = sd*(schi/swgt-avgi*avgi)/((double)it-1.);
sd = sqrt(1./sd);
if (nprn!=0) {
tsi = sqrt(tsi);
std::cout<<std::endl;
std::cout<<" << integration by vegas >>"<<std::endl;
std::cout<<" iteration no. "<<std::setw(4)<<it
<<std::setw(10)<<std::setprecision(6)
<<" integral= "<<ti<<std::endl;
std::cout<<" std dev = "<<tsi<<std::endl;
std::cout<<" accumulated results: integral = "<<avgi<<std::endl;
std::cout<<" std dev = "<<sd<<std::endl;
if (it > 1) {
std::cout<<" chi**2 per it'n = "
<<std::setw(10)<<std::setprecision(4)<<chi2a<<std::endl;
}
if (nprn<0) {
for (int j=0;j<ndim;j++) {
std::cout<<" == data for axis "
<<std::setw(2)<<j<<" --"<<std::endl;
std::cout<<" x delt i convce";
std::cout<<" x delt i convce";
std::cout<<" x delt i convce"<<std::endl;
}
}
}
// refine grid
// startVegasRefine = getrusage_usec();
double r[nd_max];
double dt[ndim_max];
for (int j=0;j<ndim;j++) {
double xo = d[j][0];
double xn = d[j][1];
d[j][0] = 0.5*(xo+xn);
dt[j] = d[j][0];
for (int i=1;i<nd-1;i++) {
d[j][i] = xo+xn;
xo = xn;
xn = d[j][i+1];
d[j][i] = (d[j][i]+xn)/3.;
dt[j] += d[j][i];
}
d[j][nd-1] = 0.5*(xn+xo);
dt[j] += d[j][nd-1];
}
for (int j=0;j<ndim;j++) {
double rc = 0.;
for (int i=0;i<nd;i++) {
r[i] = 0.;
if (d[j][i]>0.) {
double xo = dt[j]/d[j][i];
if (!isinf(xo))
r[i] = pow(((xo-1.)/xo/log(xo)),alph);
}
rc += r[i];
}
rc /= xnd;
int k = -1;
double xn = 0.;
double dr = xn;
int i = k;
k++;
dr += r[k];
double xo = xn;
xn = xi[j][k];
do {
while (dr<=rc) {
k++;
dr += r[k];
xo = xn;
xn = xi[j][k];
}
i++;
dr -= rc;
xin[i] = xn-(xn-xo)*dr/r[k];
} while (i<nd-2);
for (int i=0;i<nd-1;i++) {
xi[j][i] = (double)xin[i];
}
xi[j][nd-1] = 1.;
}
cutilSafeCall(cudaMemcpyToSymbol(g_xi, xi, sizeof(xi)));
cudaThreadSynchronize(); // wait for synchronize
// endVegasRefine = getrusage_usec();
timeVegasRefine += endVegasRefine-startVegasRefine;
} while (it<itmx && acc*fabs(avgi)<sd);
cutilSafeCall(cudaFreeHost(hFval));
cutilSafeCall(cudaFree(gFval));
cutilSafeCall(cudaFreeHost(hIAval));
cutilSafeCall(cudaFree(gIAval));
}
|
982f2088b3b1726dcf06686570707e715b85608b.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file gtc_app.cu
*
* @brief single-source shortest path (SSSP) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/gtc/gtc_enactor.cuh>
#include <gunrock/app/gtc/gtc_test.cuh>
namespace gunrock {
namespace app {
namespace sssp {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run SSSP tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the SSSP
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool mark_pred = parameters.Get<bool>("mark-pred");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("SSSP", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform SSSP
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_distances, h_preds, target));
SizeT num_errors = app::sssp::Validate_Results(
parameters, graph, src, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[run_num % num_srcs],
(VertexT *)NULL, false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_distances, h_preds, target));
if (validation == "last") {
SizeT num_errors = app::sssp::Validate_Results(
parameters, graph, src, h_distances, h_preds,
ref_distances == NULL ? NULL
: ref_distances[(num_runs - 1) % num_srcs]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_distances);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances;
h_distances = NULL;
delete[] h_preds;
h_preds = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace sssp
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_sssp function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_sssp(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **distances,
typename GraphT::VertexT **preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::sssp::Problem<GraphT> ProblemT;
typedef gunrock::app::sssp::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances[src_num], preds == NULL ? NULL : preds[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename SSSPValueT = GValueT>
double sssp(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs, VertexT *sources,
const bool mark_pred, SSSPValueT **distances,
VertexT **preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("sssp");
gunrock::graphio::UseParameters(parameters);
gunrock::app::sssp::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, gunrock::util::HOST);
graph.CsrT::edge_values.SetPointer(edge_values, gunrock::util::HOST);
graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the SSSP
double elapsed_time = gunrock_sssp(parameters, graph, distances, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 982f2088b3b1726dcf06686570707e715b85608b.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file gtc_app.cu
*
* @brief single-source shortest path (SSSP) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/gtc/gtc_enactor.cuh>
#include <gunrock/app/gtc/gtc_test.cuh>
namespace gunrock {
namespace app {
namespace sssp {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run SSSP tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the SSSP
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool mark_pred = parameters.Get<bool>("mark-pred");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("SSSP", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform SSSP
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_distances, h_preds, target));
SizeT num_errors = app::sssp::Validate_Results(
parameters, graph, src, h_distances, h_preds,
ref_distances == NULL ? NULL : ref_distances[run_num % num_srcs],
(VertexT *)NULL, false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_distances, h_preds, target));
if (validation == "last") {
SizeT num_errors = app::sssp::Validate_Results(
parameters, graph, src, h_distances, h_preds,
ref_distances == NULL ? NULL
: ref_distances[(num_runs - 1) % num_srcs]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_distances);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances;
h_distances = NULL;
delete[] h_preds;
h_preds = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace sssp
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_sssp function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_sssp(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **distances,
typename GraphT::VertexT **preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::sssp::Problem<GraphT> ProblemT;
typedef gunrock::app::sssp::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances[src_num], preds == NULL ? NULL : preds[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename SSSPValueT = GValueT>
double sssp(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs, VertexT *sources,
const bool mark_pred, SSSPValueT **distances,
VertexT **preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("sssp");
gunrock::graphio::UseParameters(parameters);
gunrock::app::sssp::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer(row_offsets, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, gunrock::util::HOST);
graph.CsrT::edge_values.SetPointer(edge_values, gunrock::util::HOST);
graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the SSSP
double elapsed_time = gunrock_sssp(parameters, graph, distances, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
ff9617d11ac7acca10decd4f6006b16ae469774e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_plugin_helper.h"
#include "metutil.h"
#include "tpot.cuh"
__global__ void himan::plugin::tpot_cuda::Calculate(const double* __restrict__ d_t, const double* __restrict__ d_p,
const double* __restrict__ d_td, double* __restrict__ d_tp,
double* __restrict__ d_tpw, double* __restrict__ d_tpe,
options opts)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < opts.N)
{
double P = (opts.is_constant_pressure) ? opts.p_const : d_p[idx];
if (opts.theta)
{
d_tp[idx] = kFloatMissing;
d_tp[idx] = Theta(opts.t_base + d_t[idx], P * opts.p_scale, opts);
}
if (opts.thetaw)
{
d_tpw[idx] = kFloatMissing;
d_tpw[idx] = ThetaW(opts.t_base + d_t[idx], opts.p_scale * P, opts.td_base + d_td[idx], opts);
}
if (opts.thetae)
{
d_tpe[idx] = kFloatMissing;
d_tpe[idx] = ThetaE(opts.t_base + d_t[idx], opts.p_scale * P, opts.td_base + d_td[idx], opts);
}
}
}
__device__ double himan::plugin::tpot_cuda::Theta(double T, double P, options opts)
{
double theta = kFloatMissing;
if (T != kFloatMissing && P != kFloatMissing)
{
theta = metutil::Theta_(T, P);
}
return theta;
}
__device__ double himan::plugin::tpot_cuda::ThetaW(double T, double P, double TD, options opts)
{
double thetaE = ThetaE(T, P, TD, opts);
if (thetaE == kFloatMissing)
{
return kFloatMissing;
}
return metutil::ThetaW_(thetaE, P);
}
__device__ double himan::plugin::tpot_cuda::ThetaE(double T, double P, double TD, options opts)
{
double value = kFloatMissing;
if (T != kFloatMissing && P != kFloatMissing & TD != kFloatMissing)
{
value = metutil::ThetaE_(T, TD, P);
}
return value;
}
void himan::plugin::tpot_cuda::Process(options& opts)
{
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
double* d_t = 0;
double* d_p = 0;
double* d_td = 0;
double* d_tp = 0;
double* d_tpw = 0;
double* d_tpe = 0;
size_t memsize = opts.N * sizeof(double);
// dims
const int blockSize = 512;
const int gridSize = opts.N / blockSize + (opts.N % blockSize == 0 ? 0 : 1);
// Allocate memory on device
if (opts.theta)
{
CUDA_CHECK(hipMalloc((void**)&d_tp, memsize));
PrepareInfo(opts.tp);
}
if (opts.thetaw)
{
CUDA_CHECK(hipMalloc((void**)&d_tpw, memsize));
PrepareInfo(opts.tpw);
}
if (opts.thetae)
{
CUDA_CHECK(hipMalloc((void**)&d_tpe, memsize));
PrepareInfo(opts.tpe);
}
CUDA_CHECK(hipMalloc((void**)&d_t, memsize));
PrepareInfo(opts.t, d_t, stream);
if (!opts.is_constant_pressure)
{
CUDA_CHECK(hipMalloc((void**)&d_p, memsize));
PrepareInfo(opts.p, d_p, stream);
}
// td
if (opts.thetaw || opts.thetae)
{
CUDA_CHECK(hipMalloc((void**)&d_td, memsize));
PrepareInfo(opts.td, d_td, stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
hipLaunchKernelGGL(( Calculate), dim3(gridSize), dim3(blockSize), 0, stream, d_t, d_p, d_td, d_tp, d_tpw, d_tpe, opts);
// block until the device has completed
CUDA_CHECK(hipStreamSynchronize(stream));
// check if kernel execution generated an error
CUDA_CHECK_ERROR_MSG("Kernel invocation");
// Retrieve result from device
if (opts.theta)
{
ReleaseInfo(opts.tp, d_tp, stream);
CUDA_CHECK(hipFree(d_tp));
}
if (opts.thetaw)
{
ReleaseInfo(opts.tpw, d_tpw, stream);
CUDA_CHECK(hipFree(d_tpw));
}
if (opts.thetae)
{
ReleaseInfo(opts.tpe, d_tpe, stream);
CUDA_CHECK(hipFree(d_tpe));
}
CUDA_CHECK(hipFree(d_t));
ReleaseInfo(opts.t);
if (d_p)
{
CUDA_CHECK(hipFree(d_p));
ReleaseInfo(opts.p);
}
if (d_td)
{
CUDA_CHECK(hipFree(d_td));
ReleaseInfo(opts.td);
}
CUDA_CHECK(hipStreamDestroy(stream));
}
| ff9617d11ac7acca10decd4f6006b16ae469774e.cu | #include "cuda_plugin_helper.h"
#include "metutil.h"
#include "tpot.cuh"
__global__ void himan::plugin::tpot_cuda::Calculate(const double* __restrict__ d_t, const double* __restrict__ d_p,
const double* __restrict__ d_td, double* __restrict__ d_tp,
double* __restrict__ d_tpw, double* __restrict__ d_tpe,
options opts)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < opts.N)
{
double P = (opts.is_constant_pressure) ? opts.p_const : d_p[idx];
if (opts.theta)
{
d_tp[idx] = kFloatMissing;
d_tp[idx] = Theta(opts.t_base + d_t[idx], P * opts.p_scale, opts);
}
if (opts.thetaw)
{
d_tpw[idx] = kFloatMissing;
d_tpw[idx] = ThetaW(opts.t_base + d_t[idx], opts.p_scale * P, opts.td_base + d_td[idx], opts);
}
if (opts.thetae)
{
d_tpe[idx] = kFloatMissing;
d_tpe[idx] = ThetaE(opts.t_base + d_t[idx], opts.p_scale * P, opts.td_base + d_td[idx], opts);
}
}
}
__device__ double himan::plugin::tpot_cuda::Theta(double T, double P, options opts)
{
double theta = kFloatMissing;
if (T != kFloatMissing && P != kFloatMissing)
{
theta = metutil::Theta_(T, P);
}
return theta;
}
__device__ double himan::plugin::tpot_cuda::ThetaW(double T, double P, double TD, options opts)
{
double thetaE = ThetaE(T, P, TD, opts);
if (thetaE == kFloatMissing)
{
return kFloatMissing;
}
return metutil::ThetaW_(thetaE, P);
}
__device__ double himan::plugin::tpot_cuda::ThetaE(double T, double P, double TD, options opts)
{
double value = kFloatMissing;
if (T != kFloatMissing && P != kFloatMissing & TD != kFloatMissing)
{
value = metutil::ThetaE_(T, TD, P);
}
return value;
}
void himan::plugin::tpot_cuda::Process(options& opts)
{
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
double* d_t = 0;
double* d_p = 0;
double* d_td = 0;
double* d_tp = 0;
double* d_tpw = 0;
double* d_tpe = 0;
size_t memsize = opts.N * sizeof(double);
// dims
const int blockSize = 512;
const int gridSize = opts.N / blockSize + (opts.N % blockSize == 0 ? 0 : 1);
// Allocate memory on device
if (opts.theta)
{
CUDA_CHECK(cudaMalloc((void**)&d_tp, memsize));
PrepareInfo(opts.tp);
}
if (opts.thetaw)
{
CUDA_CHECK(cudaMalloc((void**)&d_tpw, memsize));
PrepareInfo(opts.tpw);
}
if (opts.thetae)
{
CUDA_CHECK(cudaMalloc((void**)&d_tpe, memsize));
PrepareInfo(opts.tpe);
}
CUDA_CHECK(cudaMalloc((void**)&d_t, memsize));
PrepareInfo(opts.t, d_t, stream);
if (!opts.is_constant_pressure)
{
CUDA_CHECK(cudaMalloc((void**)&d_p, memsize));
PrepareInfo(opts.p, d_p, stream);
}
// td
if (opts.thetaw || opts.thetae)
{
CUDA_CHECK(cudaMalloc((void**)&d_td, memsize));
PrepareInfo(opts.td, d_td, stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
Calculate<<<gridSize, blockSize, 0, stream>>>(d_t, d_p, d_td, d_tp, d_tpw, d_tpe, opts);
// block until the device has completed
CUDA_CHECK(cudaStreamSynchronize(stream));
// check if kernel execution generated an error
CUDA_CHECK_ERROR_MSG("Kernel invocation");
// Retrieve result from device
if (opts.theta)
{
ReleaseInfo(opts.tp, d_tp, stream);
CUDA_CHECK(cudaFree(d_tp));
}
if (opts.thetaw)
{
ReleaseInfo(opts.tpw, d_tpw, stream);
CUDA_CHECK(cudaFree(d_tpw));
}
if (opts.thetae)
{
ReleaseInfo(opts.tpe, d_tpe, stream);
CUDA_CHECK(cudaFree(d_tpe));
}
CUDA_CHECK(cudaFree(d_t));
ReleaseInfo(opts.t);
if (d_p)
{
CUDA_CHECK(cudaFree(d_p));
ReleaseInfo(opts.p);
}
if (d_td)
{
CUDA_CHECK(cudaFree(d_td));
ReleaseInfo(opts.td);
}
CUDA_CHECK(cudaStreamDestroy(stream));
}
|
11125850173c7d990bea7a7b59fc02ecf5dfe9cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <source3.cuh>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h>
#include "c:/Users/mtf_d/Desktop/src/IPoint.h"
#include "c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h"
#include "c:/Users/mtf_d/Desktop/src/StackedPoints.h"
#include <c:/Users/mtf_d/Desktop/src/UnclassifiedPoints.h>
#include <windows.h>
#include <cstdio>
#include <ctime>
#include <hiprand/hiprand.h>
//#include "c:/Users/mtf_d/Desktop/src/PointVector.h"
namespace mcc
{
inline void GPUassert(hipError_t code, char *file, int line, bool Abort = true)
{
if (code != 0)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (Abort)
exit(code);
}
}
#define GPUerrchk(ans) \
{ \
GPUassert((ans), __FILE__, __LINE__); \
}
__global__ void basarii(double ***A, int Asize)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
//printf("thread id : %d", threadID);
//printf(" %f\n", A[threadID][threadID][threadID]);
for (int i = threadID; i < Asize; i++)
{
int tSize = A[i][0][3];
for (int j = 0; j < tSize; j++)
{
//std::cout << A[i][j][0] << " ";
printf("X:%f %d", A[i][j][0], threadID);
//std::cout << A[i][j][1] << " ";
printf("Y:%f %d", A[i][j][1], threadID);
//std::cout << A[i][j][3] << " ";
printf("Z:%f %d", A[i][j][2], threadID);
//std::cout << A[i][j][4] << " ";
printf("S:%f %d", A[i][j][3], threadID);
printf("\n");
}
printf("\n");
}
}
/*__global__ void cel(double ***A)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID == 0)
{
printf("in global");
}
int t = A[threadID][0][3];
/*for (int j = 0; j < t; j++) {
//std::cout << A[i][j][0] << " ";
printf("X:%f %d", A[threadID][j][0], threadID);
//std::cout << A[i][j][1] << " ";
printf("Y:%f %d", A[threadID][j][1], threadID);
//std::cout << A[i][j][3] << " ";
printf("Z:%f %d", A[threadID][j][2], threadID);
//std::cout << A[i][j][4] << " ";
printf("S:%f %d", A[threadID][j][3], threadID);
printf("\n");
}*/
//printf("global ici double pointer dec\n");
/*double **res = new double *[t];
for (int i = 0; i < t; i++)
{
res[i] = new double[4];
}
//printf("global ici double pointer id\n");
for (int i = 0; i < t; i++)
{
for (int j = 0; j < 4; j++)
{
res[i][j] = A[threadID][i][j];
}
}
for (int i = 0; i < t; i++)
{
printf("global ici res x : %f", res[i][0]);
printf("global ici res y : %f", res[i][1]);
printf("global ici res z : %f", res[i][2]);
printf("global ici res s : %f", res[i][3]);
printf("threadID : %d", threadID);
printf("\n");
}
printf("global ");
printf("%d\n", threadID);
printf("cells x: %f cells y: %f", cells[threadID][0][1], cells[threadID][0][2]);*/
/*int twos = cells[threadID][0][0];
printf("global ici double pointer dec\n");
double **res = new double*[twos];
for (int i = 0; i < 8; i++) {
res[i] = new double[3];
}
printf("global ici double pointer id\n");
for (int i = 0; i < twos; i++) {
for (int j = 0; j < 3; j++) {
res[i][j] = cells[threadID][i][j];
}
}
for (int i = 0; i < twos; i++) {
for (int j = 0; j < 3; j++) {
printf("global ici res : %f\n", res[i][j]);
}
}
}*/
/*if (threadID == 630) {
for (int i = 630; i < 631; i++) {
int twoS = cells[i][0][0];
for (int j = 0; j < twoS; j++) {
printf("\nres %d : %f ", i, cells[i][j][1]);
printf("res %d : %f\n", i, cells[i][j][2]);
}
printf("\n");
}
printf("global ici 631.deger\n");
for (int j = 0; j < points[631][0][3]; j++) {
//std::cout << A[i][j][0] << " ";
printf("X:%f ", points[631][j][0]);
//std::cout << A[i][j][1] << " ";
printf("Y:%f ", points[631][j][1]);
//std::cout << A[i][j][3] << " ";
printf("Z:%f ", points[631][j][2]);
//std::cout << A[i][j][4] << " ";
printf("S:%f ", points[631][j][3]);
printf("\n");
}
}*/
__global__ void spline(double ***points, double ***cells, double ***res, double ***mtx_l, double **mtx_v, int size)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
//if (threadID > size) return;
if (threadID < size)
{
int p = points[threadID][0][3];
//double *mtx_v = new double[p + 3];
/*if (threadID == 512) {
printf("end dec\n");
}*/
//
//double a = 0.0;
for (int i = 0; i < p; ++i)
{
for (int j = i + 1; j < p; ++j)
{
//double pt_x = points[threadID][i][0] - points[threadID][j][0];
//double pt_z = points[threadID][i][2] - points[threadID][j][2];
//double x = pt_x * pt_x;
//double z = pt_z * pt_z;
double elen = sqrt(((points[threadID][i][0] - points[threadID][j][0])*(points[threadID][i][0] - points[threadID][j][0])) + ((points[threadID][i][2] - points[threadID][j][2])*(points[threadID][i][2] - points[threadID][j][2])));
if (elen == 0)
{
mtx_l[threadID][i][j] = mtx_l[threadID][j][i] = 0.0;
}
else
{
mtx_l[threadID][i][j] = mtx_l[threadID][j][i] = elen * elen * log(elen);
}
//a += elen * 2;
if (i == 0 && j == 1)
{
//printf("%f", elen);
}
}
}
//printf("first for\n");
//a /= (double)(p * p);
//printf("a : %f\n", a);
for (int i = 0; i < p; ++i)
{
// diagonal: reqularization parameters (lambda * a^2)
/*if (i == 0) {
std::cout << "sirayla i mtx_l(0,1)" << i << mtx_l(0, 1) << std::endl;
}*/
mtx_l[threadID][i][i] = 0.0;
// P (p x 3, upper right)
mtx_l[threadID][i][p + 0] = 1.0;
mtx_l[threadID][i][p + 1] = points[threadID][i][0];
mtx_l[threadID][i][p + 2] = points[threadID][i][2];
// P transposed (3 x p, bottom left)
mtx_l[threadID][p + 0][i] = 1.0;
mtx_l[threadID][p + 1][i] = points[threadID][i][0];
mtx_l[threadID][p + 2][i] = points[threadID][i][2];
}
//printf("second for\n");
// O (3 x 3, lower right)
for (int i = p; i < p + 3; ++i)
for (int j = p; j < p + 3; ++j)
mtx_l[threadID][i][j] = 0.0;
//printf("third for\n");
// Fill the right hand vector V
for (int i = 0; i < p; ++i)
mtx_v[threadID][i] = points[threadID][i][1];
mtx_v[threadID][p + 0] = mtx_v[threadID][p + 1] = mtx_v[threadID][p + 2] = 0.0;
//printf("fourth for\n");
/*if (threadID == 512) {
printf("mtx_v\n");
for (int i = 0; i < p+3; i++) {
printf("%f\n", mtx_v[i]);
}
printf("mtx_l\n");
for (int i = 0; i < p+3; i++) {
for (int j = 0; j < p+3; j++) {
printf("%f ", mtx_l[i][j]);
}
printf("\n");
}
}*/
int m = p + 3, n = p + 3;
//int pivsign = 0;
int *piv = new int[m];
for (int i = 0; i < m; ++i)
piv[i] = i;
//pivsign = 1;
for (int j = 0; j < n; ++j)
{
double *col = new double[m];
for (int i = 0; i < m; i++)
{
col[i] = mtx_l[threadID][i][j];
}
double *row = new double[n];
for (int i = 0; i < m; ++i)
{
for (int l = 0; l < n; l++)
{
row[l] = mtx_l[threadID][i][l];
}
int kmax = fminf(i, j);
double s = 0.0;
for (int k = 0; k < kmax; k++)
{
s += row[k] * col[k];
}
row[j] = col[i] -= s;
for (int l = 0; l < m; l++)
{
mtx_l[threadID][l][j] = col[l];
}
for (int l = 0; l < m; l++)
{
mtx_l[threadID][i][l] = row[l];
}
}
free(row);
int p = j;
for (int i = j + 1; i < m; i++)
{
if (fabs(col[i]) > fabs(col[p]))
{
p = i;
}
}
free(col);
if (p != j)
{
for (int k = 0; k < n; k++)
{
double t = mtx_l[threadID][p][k];
mtx_l[threadID][p][k] = mtx_l[threadID][j][k];
mtx_l[threadID][j][k] = t;
}
int k = piv[p];
piv[p] = piv[j];
piv[j] = k;
//pivsign = -pivsign;
}
if (j < m && mtx_l[threadID][j][j] != 0.0)
{
for (int i = j + 1; i < m; i++)
{
mtx_l[threadID][i][j] /= mtx_l[threadID][j][j];
}
}
}
double y = 0;
for (int i = 0; i < m; ++i)
{
if (piv[i] != i)
{
y = mtx_v[threadID][i];
mtx_v[threadID][i] = mtx_v[threadID][piv[i]];
mtx_v[threadID][piv[i]] = y;
}
for (int j = i; j < m; ++j)
if (piv[j] == i)
{
piv[j] = piv[i];
break;
}
}
free(piv);
for (int k = 0; k < n; k++)
{
for (int i = k + 1; i < n; i++)
{
mtx_v[threadID][i] -= mtx_v[threadID][k] * mtx_l[threadID][i][k];
}
}
/*printf("mtx_v\n");
for (int i = 0; i < m; i++) {
printf("%f\n", mtx_v[i]);
}*/
for (int k = n - 1; k >= 0; k--)
{
mtx_v[threadID][k] /= mtx_l[threadID][k][k];
//printf("1. %f\n", mtx_v[k]);
for (int i = 0; i < k; i++)
{
mtx_v[threadID][i] -= mtx_v[threadID][k] * mtx_l[threadID][i][k];
}
//printf("2. %f\n", mtx_v[k]);
}
/*printf("mtx_v\n");
for (int i = 0; i < m; i++) {
printf("%f\n", mtx_v[i]);
}*/
int trn = cells[threadID][0][0];
//printf("trn : %d\n", trn);
for (int j = 0; j < trn; j++)
{
/*printf("mtx_v[p+0] : %f", mtx_v[p + 0]);
printf("mtx_v[p + 1] : %f", mtx_v[p + 1]);
printf("cells[threadID][j][1] : %f", cells[threadID][j][1]);
printf("mtx_v[p + 2] : %f", mtx_v[p + 2]);
printf("cells[threadID][j][2] : %f", cells[threadID][j][2]);*/
double h = mtx_v[threadID][p + 0] + mtx_v[threadID][p + 1] * cells[threadID][j][1] + mtx_v[threadID][p + 2] * cells[threadID][j][2];
//printf("global ici h : %f\n", h);
double cx = cells[threadID][j][1];
double cy = cells[threadID][j][2];
for (int i = 0; i < p; i++)
{
double x = points[threadID][i][0] - cx;
double z = points[threadID][i][2] - cy;
double xx = x * x;
double zz = z * z;
double elen = sqrtf(xx + zz);
//printf("elen : %f ", elen);
//printf("sqrtf ile : %f ", sqrtf((x*x) + (z*z)));
if (elen == 0.0)
{
h += mtx_v[threadID][i] * 0.0;
}
else
{
h += mtx_v[threadID][i] * (elen * elen * logf(elen));
}
//printf("res h: %f\n", h);
}
//printf("\nson res h: %f", h);
res[threadID][j][0] = h;
//printf("\nres[threadID][j][0] = %f", res[threadID][j][0]);
/*if (threadID == 0)
{
//printf("\n%d. thread h: %f res[0] : %f \n", threadID, h, res[threadID][j][0]);
//res[threadID + 1][j][0] = res[threadID][j][0];
//printf("\n%d. thread h: %f res[631] : %f \n", threadID + 1, h, res[threadID + 1][j][0]);
}*/
}
//__syncthreads();
free(mtx_v);
}
}
void source3::cells(double ***A, double ***cellsize, double ***h_res, int Asize, int celS){
//allocate control_points
std::cout << "allocating mainArray " << std::endl;
double ***h_c = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
h_c[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(hipMalloc((void **)&h_c[i][j], 4 * sizeof(double)));
GPUerrchk(hipMemcpy(h_c[i][j], A[i][j], 4 * sizeof(double), hipMemcpyHostToDevice));
}
}
double ***h_c1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
GPUerrchk(hipMalloc((void ***)&(h_c1[i]), twoSize * sizeof(double *)));
GPUerrchk(hipMemcpy(h_c1[i], h_c[i], twoSize * sizeof(double *), hipMemcpyHostToDevice));
}
double ***d_c;
GPUerrchk(hipMalloc((void ****)&d_c, Asize * sizeof(double **)));
GPUerrchk(hipMemcpy(d_c, h_c1, Asize * sizeof(double **), hipMemcpyHostToDevice));
//allocate cells
std::cout << "allocating cellsArray" << std::endl;
double ***cells1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
cells1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(hipMalloc((void **)&cells1[i][j], 3 * sizeof(double)));
GPUerrchk(hipMemcpy(cells1[i][j], cellsize[i][j], 3 * sizeof(double), hipMemcpyHostToDevice));
}
}
double ***cells = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
GPUerrchk(hipMalloc((void ***)&(cells[i]), twoSize * sizeof(double *)));
GPUerrchk(hipMemcpy(cells[i], cells1[i], twoSize * sizeof(double *), hipMemcpyHostToDevice));
}
double ***d_cells;
GPUerrchk(hipMalloc((void ****)&d_cells, Asize * sizeof(double **)));
GPUerrchk(hipMemcpy(d_cells, cells, Asize * sizeof(double **), hipMemcpyHostToDevice));
//allocate res
std::cout << "allocating resArray" << std::endl;
double ***res1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
res1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(hipMalloc((void **)&res1[i][j], 1 * sizeof(double)));
}
}
double ***res2 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
GPUerrchk(hipMalloc((void ***)&res2[i], twoSize * sizeof(double *)));
GPUerrchk(hipMemcpy(res2[i], res1[i], twoSize * sizeof(double *), hipMemcpyHostToDevice));
}
double ***d_res;
GPUerrchk(hipMalloc((void ****)&d_res, Asize * sizeof(double ***)));
GPUerrchk(hipMemcpy(d_res, res2, Asize * sizeof(double **), hipMemcpyHostToDevice));
std::cout << "allocating mtx_l" << std::endl;
double ***mtx_l1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3] + 3;
mtx_l1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
hipMalloc((void **)&mtx_l1[i][j], twoSize * sizeof(double));
}
}
double ***mtx_l2 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3] + 3;
hipMalloc((void ***)&(mtx_l2[i]), twoSize * sizeof(double *));
hipMemcpy(mtx_l2[i], mtx_l1[i], twoSize * sizeof(double *), hipMemcpyHostToDevice);
}
double ***d_mtx_l;
hipMalloc((void ****)&d_mtx_l, Asize * sizeof(double **));
hipMemcpy(d_mtx_l, mtx_l2, Asize * sizeof(double **), hipMemcpyHostToDevice);
std::cout << "allocating mtx_v" << std::endl;
double **mtx_v1 = (double **)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
hipMalloc(&mtx_v1[i], twoSize * sizeof(double));
}
double **d_mtx_v = (double **)malloc(Asize * sizeof(double **));
hipMalloc(&d_mtx_v, Asize * sizeof(double **));
hipMemcpy(d_mtx_v, mtx_v1, Asize * sizeof(double), hipMemcpyHostToDevice);
unsigned int numberOfThreads = Asize;
unsigned int requiredNumberOfBlocks = (numberOfThreads / 1024) + 1;
dim3 block = dim3(1024, 1, 1);
dim3 grid = dim3(requiredNumberOfBlocks, 1, 1);
std::clock_t start;
double duration;
start = std::clock();
//hipDeviceSetLimit(hipLimitMallocHeapSize, 32*1024*1024);
printf("launch kernel ");
hipLaunchKernelGGL(( spline), dim3(grid), dim3(block), 0, 0, d_c, d_cells, d_res, d_mtx_l, d_mtx_v, Asize); //---->>>>>><<<<<<<>>>>>>><<<<<<<>>>>>>><<<<<<>>>>
printf("end kernel \n");
hipDeviceSynchronize();
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
std::cout << "kernel process time : " << duration << " seconds" << std::endl;
std::cout << std::endl;
printf("copy gpu to ram");
for (int i = 0; i < Asize; i++)
{
int twoS = cellsize[i][0][0];
for (int j = 0; j < twoS; j++)
{
hipMemcpy(&h_res[i][j][0], res1[i][j], 1 * sizeof(double), hipMemcpyDeviceToHost);
}
}
}
} | 11125850173c7d990bea7a7b59fc02ecf5dfe9cf.cu | #include <cuda_runtime.h>
#include <source3.cuh>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <helper_functions.h>
#include <c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h>
#include "c:/Users/mtf_d/Desktop/src/IPoint.h"
#include "c:/Users/mtf_d/Desktop/src/IUnclassifiedPoints.h"
#include "c:/Users/mtf_d/Desktop/src/StackedPoints.h"
#include <c:/Users/mtf_d/Desktop/src/UnclassifiedPoints.h>
#include <windows.h>
#include <cstdio>
#include <ctime>
#include <curand.h>
//#include "c:/Users/mtf_d/Desktop/src/PointVector.h"
namespace mcc
{
inline void GPUassert(cudaError_t code, char *file, int line, bool Abort = true)
{
if (code != 0)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (Abort)
exit(code);
}
}
#define GPUerrchk(ans) \
{ \
GPUassert((ans), __FILE__, __LINE__); \
}
__global__ void basarii(double ***A, int Asize)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
//printf("thread id : %d", threadID);
//printf(" %f\n", A[threadID][threadID][threadID]);
for (int i = threadID; i < Asize; i++)
{
int tSize = A[i][0][3];
for (int j = 0; j < tSize; j++)
{
//std::cout << A[i][j][0] << " ";
printf("X:%f %d", A[i][j][0], threadID);
//std::cout << A[i][j][1] << " ";
printf("Y:%f %d", A[i][j][1], threadID);
//std::cout << A[i][j][3] << " ";
printf("Z:%f %d", A[i][j][2], threadID);
//std::cout << A[i][j][4] << " ";
printf("S:%f %d", A[i][j][3], threadID);
printf("\n");
}
printf("\n");
}
}
/*__global__ void cel(double ***A)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID == 0)
{
printf("in global");
}
int t = A[threadID][0][3];
/*for (int j = 0; j < t; j++) {
//std::cout << A[i][j][0] << " ";
printf("X:%f %d", A[threadID][j][0], threadID);
//std::cout << A[i][j][1] << " ";
printf("Y:%f %d", A[threadID][j][1], threadID);
//std::cout << A[i][j][3] << " ";
printf("Z:%f %d", A[threadID][j][2], threadID);
//std::cout << A[i][j][4] << " ";
printf("S:%f %d", A[threadID][j][3], threadID);
printf("\n");
}*/
//printf("global ici double pointer dec\n");
/*double **res = new double *[t];
for (int i = 0; i < t; i++)
{
res[i] = new double[4];
}
//printf("global ici double pointer id\n");
for (int i = 0; i < t; i++)
{
for (int j = 0; j < 4; j++)
{
res[i][j] = A[threadID][i][j];
}
}
for (int i = 0; i < t; i++)
{
printf("global ici res x : %f", res[i][0]);
printf("global ici res y : %f", res[i][1]);
printf("global ici res z : %f", res[i][2]);
printf("global ici res s : %f", res[i][3]);
printf("threadID : %d", threadID);
printf("\n");
}
printf("global ");
printf("%d\n", threadID);
printf("cells x: %f cells y: %f", cells[threadID][0][1], cells[threadID][0][2]);*/
/*int twos = cells[threadID][0][0];
printf("global ici double pointer dec\n");
double **res = new double*[twos];
for (int i = 0; i < 8; i++) {
res[i] = new double[3];
}
printf("global ici double pointer id\n");
for (int i = 0; i < twos; i++) {
for (int j = 0; j < 3; j++) {
res[i][j] = cells[threadID][i][j];
}
}
for (int i = 0; i < twos; i++) {
for (int j = 0; j < 3; j++) {
printf("global ici res : %f\n", res[i][j]);
}
}
}*/
/*if (threadID == 630) {
for (int i = 630; i < 631; i++) {
int twoS = cells[i][0][0];
for (int j = 0; j < twoS; j++) {
printf("\nres %d : %f ", i, cells[i][j][1]);
printf("res %d : %f\n", i, cells[i][j][2]);
}
printf("\n");
}
printf("global ici 631.deger\n");
for (int j = 0; j < points[631][0][3]; j++) {
//std::cout << A[i][j][0] << " ";
printf("X:%f ", points[631][j][0]);
//std::cout << A[i][j][1] << " ";
printf("Y:%f ", points[631][j][1]);
//std::cout << A[i][j][3] << " ";
printf("Z:%f ", points[631][j][2]);
//std::cout << A[i][j][4] << " ";
printf("S:%f ", points[631][j][3]);
printf("\n");
}
}*/
__global__ void spline(double ***points, double ***cells, double ***res, double ***mtx_l, double **mtx_v, int size)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
//if (threadID > size) return;
if (threadID < size)
{
int p = points[threadID][0][3];
//double *mtx_v = new double[p + 3];
/*if (threadID == 512) {
printf("end dec\n");
}*/
//
//double a = 0.0;
for (int i = 0; i < p; ++i)
{
for (int j = i + 1; j < p; ++j)
{
//double pt_x = points[threadID][i][0] - points[threadID][j][0];
//double pt_z = points[threadID][i][2] - points[threadID][j][2];
//double x = pt_x * pt_x;
//double z = pt_z * pt_z;
double elen = sqrt(((points[threadID][i][0] - points[threadID][j][0])*(points[threadID][i][0] - points[threadID][j][0])) + ((points[threadID][i][2] - points[threadID][j][2])*(points[threadID][i][2] - points[threadID][j][2])));
if (elen == 0)
{
mtx_l[threadID][i][j] = mtx_l[threadID][j][i] = 0.0;
}
else
{
mtx_l[threadID][i][j] = mtx_l[threadID][j][i] = elen * elen * log(elen);
}
//a += elen * 2;
if (i == 0 && j == 1)
{
//printf("%f", elen);
}
}
}
//printf("first for\n");
//a /= (double)(p * p);
//printf("a : %f\n", a);
for (int i = 0; i < p; ++i)
{
// diagonal: reqularization parameters (lambda * a^2)
/*if (i == 0) {
std::cout << "sirayla i mtx_l(0,1)" << i << mtx_l(0, 1) << std::endl;
}*/
mtx_l[threadID][i][i] = 0.0;
// P (p x 3, upper right)
mtx_l[threadID][i][p + 0] = 1.0;
mtx_l[threadID][i][p + 1] = points[threadID][i][0];
mtx_l[threadID][i][p + 2] = points[threadID][i][2];
// P transposed (3 x p, bottom left)
mtx_l[threadID][p + 0][i] = 1.0;
mtx_l[threadID][p + 1][i] = points[threadID][i][0];
mtx_l[threadID][p + 2][i] = points[threadID][i][2];
}
//printf("second for\n");
// O (3 x 3, lower right)
for (int i = p; i < p + 3; ++i)
for (int j = p; j < p + 3; ++j)
mtx_l[threadID][i][j] = 0.0;
//printf("third for\n");
// Fill the right hand vector V
for (int i = 0; i < p; ++i)
mtx_v[threadID][i] = points[threadID][i][1];
mtx_v[threadID][p + 0] = mtx_v[threadID][p + 1] = mtx_v[threadID][p + 2] = 0.0;
//printf("fourth for\n");
/*if (threadID == 512) {
printf("mtx_v\n");
for (int i = 0; i < p+3; i++) {
printf("%f\n", mtx_v[i]);
}
printf("mtx_l\n");
for (int i = 0; i < p+3; i++) {
for (int j = 0; j < p+3; j++) {
printf("%f ", mtx_l[i][j]);
}
printf("\n");
}
}*/
int m = p + 3, n = p + 3;
//int pivsign = 0;
int *piv = new int[m];
for (int i = 0; i < m; ++i)
piv[i] = i;
//pivsign = 1;
for (int j = 0; j < n; ++j)
{
double *col = new double[m];
for (int i = 0; i < m; i++)
{
col[i] = mtx_l[threadID][i][j];
}
double *row = new double[n];
for (int i = 0; i < m; ++i)
{
for (int l = 0; l < n; l++)
{
row[l] = mtx_l[threadID][i][l];
}
int kmax = fminf(i, j);
double s = 0.0;
for (int k = 0; k < kmax; k++)
{
s += row[k] * col[k];
}
row[j] = col[i] -= s;
for (int l = 0; l < m; l++)
{
mtx_l[threadID][l][j] = col[l];
}
for (int l = 0; l < m; l++)
{
mtx_l[threadID][i][l] = row[l];
}
}
free(row);
int p = j;
for (int i = j + 1; i < m; i++)
{
if (fabs(col[i]) > fabs(col[p]))
{
p = i;
}
}
free(col);
if (p != j)
{
for (int k = 0; k < n; k++)
{
double t = mtx_l[threadID][p][k];
mtx_l[threadID][p][k] = mtx_l[threadID][j][k];
mtx_l[threadID][j][k] = t;
}
int k = piv[p];
piv[p] = piv[j];
piv[j] = k;
//pivsign = -pivsign;
}
if (j < m && mtx_l[threadID][j][j] != 0.0)
{
for (int i = j + 1; i < m; i++)
{
mtx_l[threadID][i][j] /= mtx_l[threadID][j][j];
}
}
}
double y = 0;
for (int i = 0; i < m; ++i)
{
if (piv[i] != i)
{
y = mtx_v[threadID][i];
mtx_v[threadID][i] = mtx_v[threadID][piv[i]];
mtx_v[threadID][piv[i]] = y;
}
for (int j = i; j < m; ++j)
if (piv[j] == i)
{
piv[j] = piv[i];
break;
}
}
free(piv);
for (int k = 0; k < n; k++)
{
for (int i = k + 1; i < n; i++)
{
mtx_v[threadID][i] -= mtx_v[threadID][k] * mtx_l[threadID][i][k];
}
}
/*printf("mtx_v\n");
for (int i = 0; i < m; i++) {
printf("%f\n", mtx_v[i]);
}*/
for (int k = n - 1; k >= 0; k--)
{
mtx_v[threadID][k] /= mtx_l[threadID][k][k];
//printf("1. %f\n", mtx_v[k]);
for (int i = 0; i < k; i++)
{
mtx_v[threadID][i] -= mtx_v[threadID][k] * mtx_l[threadID][i][k];
}
//printf("2. %f\n", mtx_v[k]);
}
/*printf("mtx_v\n");
for (int i = 0; i < m; i++) {
printf("%f\n", mtx_v[i]);
}*/
int trn = cells[threadID][0][0];
//printf("trn : %d\n", trn);
for (int j = 0; j < trn; j++)
{
/*printf("mtx_v[p+0] : %f", mtx_v[p + 0]);
printf("mtx_v[p + 1] : %f", mtx_v[p + 1]);
printf("cells[threadID][j][1] : %f", cells[threadID][j][1]);
printf("mtx_v[p + 2] : %f", mtx_v[p + 2]);
printf("cells[threadID][j][2] : %f", cells[threadID][j][2]);*/
double h = mtx_v[threadID][p + 0] + mtx_v[threadID][p + 1] * cells[threadID][j][1] + mtx_v[threadID][p + 2] * cells[threadID][j][2];
//printf("global ici h : %f\n", h);
double cx = cells[threadID][j][1];
double cy = cells[threadID][j][2];
for (int i = 0; i < p; i++)
{
double x = points[threadID][i][0] - cx;
double z = points[threadID][i][2] - cy;
double xx = x * x;
double zz = z * z;
double elen = sqrtf(xx + zz);
//printf("elen : %f ", elen);
//printf("sqrtf ile : %f ", sqrtf((x*x) + (z*z)));
if (elen == 0.0)
{
h += mtx_v[threadID][i] * 0.0;
}
else
{
h += mtx_v[threadID][i] * (elen * elen * logf(elen));
}
//printf("res h: %f\n", h);
}
//printf("\nson res h: %f", h);
res[threadID][j][0] = h;
//printf("\nres[threadID][j][0] = %f", res[threadID][j][0]);
/*if (threadID == 0)
{
//printf("\n%d. thread h: %f res[0] : %f \n", threadID, h, res[threadID][j][0]);
//res[threadID + 1][j][0] = res[threadID][j][0];
//printf("\n%d. thread h: %f res[631] : %f \n", threadID + 1, h, res[threadID + 1][j][0]);
}*/
}
//__syncthreads();
free(mtx_v);
}
}
void source3::cells(double ***A, double ***cellsize, double ***h_res, int Asize, int celS){
//allocate control_points
std::cout << "allocating mainArray " << std::endl;
double ***h_c = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
h_c[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(cudaMalloc((void **)&h_c[i][j], 4 * sizeof(double)));
GPUerrchk(cudaMemcpy(h_c[i][j], A[i][j], 4 * sizeof(double), cudaMemcpyHostToDevice));
}
}
double ***h_c1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
GPUerrchk(cudaMalloc((void ***)&(h_c1[i]), twoSize * sizeof(double *)));
GPUerrchk(cudaMemcpy(h_c1[i], h_c[i], twoSize * sizeof(double *), cudaMemcpyHostToDevice));
}
double ***d_c;
GPUerrchk(cudaMalloc((void ****)&d_c, Asize * sizeof(double **)));
GPUerrchk(cudaMemcpy(d_c, h_c1, Asize * sizeof(double **), cudaMemcpyHostToDevice));
//allocate cells
std::cout << "allocating cellsArray" << std::endl;
double ***cells1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
cells1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(cudaMalloc((void **)&cells1[i][j], 3 * sizeof(double)));
GPUerrchk(cudaMemcpy(cells1[i][j], cellsize[i][j], 3 * sizeof(double), cudaMemcpyHostToDevice));
}
}
double ***cells = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
GPUerrchk(cudaMalloc((void ***)&(cells[i]), twoSize * sizeof(double *)));
GPUerrchk(cudaMemcpy(cells[i], cells1[i], twoSize * sizeof(double *), cudaMemcpyHostToDevice));
}
double ***d_cells;
GPUerrchk(cudaMalloc((void ****)&d_cells, Asize * sizeof(double **)));
GPUerrchk(cudaMemcpy(d_cells, cells, Asize * sizeof(double **), cudaMemcpyHostToDevice));
//allocate res
std::cout << "allocating resArray" << std::endl;
double ***res1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
res1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
GPUerrchk(cudaMalloc((void **)&res1[i][j], 1 * sizeof(double)));
}
}
double ***res2 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = cellsize[i][0][0];
GPUerrchk(cudaMalloc((void ***)&res2[i], twoSize * sizeof(double *)));
GPUerrchk(cudaMemcpy(res2[i], res1[i], twoSize * sizeof(double *), cudaMemcpyHostToDevice));
}
double ***d_res;
GPUerrchk(cudaMalloc((void ****)&d_res, Asize * sizeof(double ***)));
GPUerrchk(cudaMemcpy(d_res, res2, Asize * sizeof(double **), cudaMemcpyHostToDevice));
std::cout << "allocating mtx_l" << std::endl;
double ***mtx_l1 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3] + 3;
mtx_l1[i] = (double **)malloc(twoSize * sizeof(double *));
for (int j = 0; j < twoSize; j++)
{
cudaMalloc((void **)&mtx_l1[i][j], twoSize * sizeof(double));
}
}
double ***mtx_l2 = (double ***)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3] + 3;
cudaMalloc((void ***)&(mtx_l2[i]), twoSize * sizeof(double *));
cudaMemcpy(mtx_l2[i], mtx_l1[i], twoSize * sizeof(double *), cudaMemcpyHostToDevice);
}
double ***d_mtx_l;
cudaMalloc((void ****)&d_mtx_l, Asize * sizeof(double **));
cudaMemcpy(d_mtx_l, mtx_l2, Asize * sizeof(double **), cudaMemcpyHostToDevice);
std::cout << "allocating mtx_v" << std::endl;
double **mtx_v1 = (double **)malloc(Asize * sizeof(double **));
for (int i = 0; i < Asize; i++)
{
int twoSize = A[i][0][3];
cudaMalloc(&mtx_v1[i], twoSize * sizeof(double));
}
double **d_mtx_v = (double **)malloc(Asize * sizeof(double **));
cudaMalloc(&d_mtx_v, Asize * sizeof(double **));
cudaMemcpy(d_mtx_v, mtx_v1, Asize * sizeof(double), cudaMemcpyHostToDevice);
unsigned int numberOfThreads = Asize;
unsigned int requiredNumberOfBlocks = (numberOfThreads / 1024) + 1;
dim3 block = dim3(1024, 1, 1);
dim3 grid = dim3(requiredNumberOfBlocks, 1, 1);
std::clock_t start;
double duration;
start = std::clock();
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, 32*1024*1024);
printf("launch kernel ");
spline<<<grid, block>>>(d_c, d_cells, d_res, d_mtx_l, d_mtx_v, Asize); //---->>>>>><<<<<<<>>>>>>><<<<<<<>>>>>>><<<<<<>>>>
printf("end kernel \n");
cudaDeviceSynchronize();
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
std::cout << "kernel process time : " << duration << " seconds" << std::endl;
std::cout << std::endl;
printf("copy gpu to ram");
for (int i = 0; i < Asize; i++)
{
int twoS = cellsize[i][0][0];
for (int j = 0; j < twoS; j++)
{
cudaMemcpy(&h_res[i][j][0], res1[i][j], 1 * sizeof(double), cudaMemcpyDeviceToHost);
}
}
}
} |
74d139599616cd569df8e4facfa7a8408da19bb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
#include "ds/deviceT/dyn_grid.h"
#include "OpenSteer/PosIndexPair.h"
using OpenSteer::PosIndexPair;
using namespace OpenSteer::deviceT;
using ds::dyn_grid_node;
__device__ int3 operator+ (const int3& rhs, const int3& lhs) {
return make_int3 (rhs.x + lhs.x, rhs.y + lhs.y, rhs.z + lhs.z);
}
__device__ void neighbour_search(
const ds::deviceT::dyn_grid &grid,
const dyn_grid_node& neighbour_block, const dyn_grid_node& cur_block,
const unsigned int& my_index, const Vec3 &position,
int &neighbours_found, int *neighbours, float *neighbours_distance_squared
) {
__shared__ PosIndexPair s_PosIndex[threads_per_block];
const int end = neighbour_block.size;
for (int base=0; base < end; base+=threads_per_block) {
if (base + threadIdx.x < end) {
s_PosIndex[threadIdx.x] = grid.get_PosIndex(neighbour_block, base+threadIdx.x);
}
__syncthreads();
if (threadIdx.x < cur_block.size) {
int i=0;
while ( base+i < end && i < threads_per_block) {
const Vec3 offset = position - s_PosIndex[i].position();
const float d2 = offset.lengthSquared();
const int cur_index = s_PosIndex[i].index();
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
neighbours[neighbours_found] = cur_index;
neighbours_distance_squared[neighbours_found] = d2;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = neighbours_distance_squared[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
neighbours[max_neighbour_distance_index] = cur_index;
neighbours_distance_squared[max_neighbour_distance_index] = d2;
}
}
}
++i;
}
}
__syncthreads();
}
}
__global__ void find_neighbours_simulate_dyn_gr (
const ds::deviceT::dyn_grid &grid_,
const cupp::deviceT::vector< Vec3 > &positions,
const cupp::deviceT::vector< Vec3 > &forwards,
cupp::deviceT::vector< Vec3 > &steering_results)
{
__shared__ ds::deviceT::dyn_grid grid;
grid = grid_;
__shared__ dyn_grid_node cur_block;
__shared__ dyn_grid_node neighbour_block;
if (threadIdx.x == 0) {
cur_block = grid.get_block_data(blockIdx.x);
}
if (threadIdx.x == 1) {
neighbour_block = grid.get_area(blockIdx.x);
}
__syncthreads();
// constants that are needed below
unsigned int my_index;
Vec3 position;
Vec3 forward;
int neighbours_found = 0;
int neighbours[neighbour_size_max];
float neighbours_distance_squared[neighbour_size_max];
if (threadIdx.x < cur_block.size) {
my_index = grid.get_index(cur_block, threadIdx.x);
position = grid.get_position(cur_block, threadIdx.x);
forward = forwards [my_index];
for (int i=0; i<neighbour_size_max; ++i) {
neighbours[i]=-1;
}
}
__syncthreads();
neighbour_search(grid, neighbour_block, cur_block, my_index, position, neighbours_found, neighbours, neighbours_distance_squared);
if (threadIdx.x >= cur_block.size) {
return;
}
bool do_seperation[neighbour_size_max];
bool do_alignment[neighbour_size_max];
bool do_cohesion[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
if (neighbours[i]==-1) {
do_seperation[i] = false;
do_alignment[i] = false;
do_cohesion[i] = false;
continue;
}
const float dist = neighbours_distance_squared[i];
if ( dist < boid_radius*3.0f ) {
do_seperation[i] = true;
do_alignment[i] = true;
do_cohesion[i] = true;
continue;
}
const Vec3 unitOffset = (position - positions[ neighbours[i]]) / sqrtf (dist);
const float forwardness = forward.dot (unitOffset);
do_seperation[i] = (forwardness > separationAngle && dist <= separationRadius*separationRadius);
do_alignment[i] = (forwardness > alignmentAngle && dist <= alignmentRadius*alignmentRadius);
do_cohesion[i] = (forwardness > cohesionAngle && dist <= cohesionRadius*cohesionRadius);
}
Vec3 separation = { 0.0f, 0.0f, 0.0f };
Vec3 alignment = { 0.0f, 0.0f, 0.0f };
Vec3 cohesion = { 0.0f, 0.0f, 0.0f };
int influencing_alignment_neighbour_count = 0;
int influencing_cohesion_neighbour_count = 0;
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo avoid obstacles if needed
int index = neighbours[i];
if (do_seperation[i]) {
Vec3 temp = position - positions[index];
if ( 0.0f != temp.lengthSquared() ) {
separation = separation + (temp / temp.lengthSquared());
} else {
separation = separation + temp;
}
}
if (do_alignment[i]) {
// accumulate sum of neighbor's heading
alignment = alignment + forwards[ index ];
// count neighbors
++influencing_alignment_neighbour_count;
}
if (do_cohesion[i]) {
// accumulate sum of neighbor's positions
cohesion = cohesion + positions[index];
// count neighbors
++influencing_cohesion_neighbour_count;
}
}
alignment = alignment - ( forward * influencing_alignment_neighbour_count );
cohesion = cohesion - ( position * influencing_cohesion_neighbour_count );
// apply weights to components (save in variables for annotation)
const Vec3 separationW = separation.normalize() * separationWeight;
const Vec3 alignmentW = alignment.normalize() * alignmentWeight;
const Vec3 cohesionW = cohesion.normalize() * cohesionWeight;
steering_results[my_index] = separationW + alignmentW + cohesionW;
}
#if 0
__global__ void find_neighbours_simulate_dyn_gr (
const ds::deviceT::dyn_grid &grid,
const cupp::deviceT::vector< Vec3 > &positions,
const cupp::deviceT::vector< Vec3 > &forwards,
cupp::deviceT::vector< Vec3 > &steering_results)
{
__shared__ dyn_grid_node cur_block;
if (threadIdx.x == 0) {
cur_block = grid.get_block_data(blockIdx.x);
}
__syncthreads();
// constants that are needed below
const PosIndexPair me = threadIdx.x < cur_block.size ? grid.get_PosIndex(cur_block, threadIdx.x) : PosIndexPair(); //local
const unsigned int &my_index = me.index();
const Vec3 &position = me.position();
const Vec3 forward = forwards [my_index];
int neighbours_found = 0;
int neighbours[neighbour_size_max];
float neighbours_distance_squared[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
neighbours[i]=-1;
}
__syncthreads();
neighbour_search(grid, cur_block, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared); // in our own grid
for (std::size_t i = blockIdx.x+1; i < gridDim.x; ++i) {
const dyn_grid_node temp = grid.get_block_data(i);
if ( temp.low_x - cur_block.high_x >= r ) {
break;
}
neighbour_search(grid, temp, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared);
}
for (int i = blockIdx.x-1; i>=0; --i) {
const dyn_grid_node temp = grid.get_block_data(i);
if ( cur_block.low_x - temp.high_x >= r ) {
break;
}
neighbour_search(grid, temp, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared);
}
if (threadIdx.x >= cur_block.size) {
return;
}
bool do_seperation[neighbour_size_max];
bool do_alignment[neighbour_size_max];
bool do_cohesion[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo try loop till neighbours_found and remove if <- requires more registers
if (neighbours[i]==-1) {
do_seperation[i] = false;
do_alignment[i] = false;
do_cohesion[i] = false;
continue;
}
const float dist = neighbours_distance_squared[i];
if ( dist < boid_radius*3.0f ) {
do_seperation[i] = true;
do_alignment[i] = true;
do_cohesion[i] = true;
continue;
}
const Vec3 unitOffset = (position - positions[ neighbours[i]]) / sqrtf (dist);
const float forwardness = forward.dot (unitOffset);
do_seperation[i] = (forwardness > separationAngle && dist <= separationRadius*separationRadius);
do_alignment[i] = (forwardness > alignmentAngle && dist <= alignmentRadius*alignmentRadius);
do_cohesion[i] = (forwardness > cohesionAngle && dist <= cohesionRadius*cohesionRadius);
}
Vec3 separation = { 0.0f, 0.0f, 0.0f };
Vec3 alignment = { 0.0f, 0.0f, 0.0f };
Vec3 cohesion = { 0.0f, 0.0f, 0.0f };
int influencing_alignment_neighbour_count = 0;
int influencing_cohesion_neighbour_count = 0;
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo avoid obstacles if needed
int index = neighbours[i];
if (do_seperation[i]) {
Vec3 temp = position - positions[index];
if ( 0.0f != temp.lengthSquared() ) {
separation = separation + (temp / temp.lengthSquared());
} else {
separation = separation + temp;
}
}
if (do_alignment[i]) {
// accumulate sum of neighbor's heading
alignment = alignment + forwards[ index ];
// count neighbors
++influencing_alignment_neighbour_count;
}
if (do_cohesion[i]) {
// accumulate sum of neighbor's positions
cohesion = cohesion + positions[index];
// count neighbors
++influencing_cohesion_neighbour_count;
}
}
alignment = alignment - ( forward * influencing_alignment_neighbour_count );
cohesion = cohesion - ( position * influencing_cohesion_neighbour_count );
// apply weights to components (save in variables for annotation)
const Vec3 separationW = separation.normalize() * separationWeight;
const Vec3 alignmentW = alignment.normalize() * alignmentWeight;
const Vec3 cohesionW = cohesion.normalize() * cohesionWeight;
steering_results[my_index] = separationW + alignmentW + cohesionW;
}
#endif
find_neighbours_simulate_dyn_gr_kernelT get_find_neighbours_simulate_dyn_grid_kernel() {
return (find_neighbours_simulate_dyn_gr_kernelT)find_neighbours_simulate_dyn_gr;
}
| 74d139599616cd569df8e4facfa7a8408da19bb5.cu | #include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
#include "ds/deviceT/dyn_grid.h"
#include "OpenSteer/PosIndexPair.h"
using OpenSteer::PosIndexPair;
using namespace OpenSteer::deviceT;
using ds::dyn_grid_node;
__device__ int3 operator+ (const int3& rhs, const int3& lhs) {
return make_int3 (rhs.x + lhs.x, rhs.y + lhs.y, rhs.z + lhs.z);
}
__device__ void neighbour_search(
const ds::deviceT::dyn_grid &grid,
const dyn_grid_node& neighbour_block, const dyn_grid_node& cur_block,
const unsigned int& my_index, const Vec3 &position,
int &neighbours_found, int *neighbours, float *neighbours_distance_squared
) {
__shared__ PosIndexPair s_PosIndex[threads_per_block];
const int end = neighbour_block.size;
for (int base=0; base < end; base+=threads_per_block) {
if (base + threadIdx.x < end) {
s_PosIndex[threadIdx.x] = grid.get_PosIndex(neighbour_block, base+threadIdx.x);
}
__syncthreads();
if (threadIdx.x < cur_block.size) {
int i=0;
while ( base+i < end && i < threads_per_block) {
const Vec3 offset = position - s_PosIndex[i].position();
const float d2 = offset.lengthSquared();
const int cur_index = s_PosIndex[i].index();
if (d2 < r2 && cur_index != my_index) {
if (neighbours_found < neighbour_size_max) {
neighbours[neighbours_found] = cur_index;
neighbours_distance_squared[neighbours_found] = d2;
++neighbours_found;
} else {
float max_neighbour_distance = 0.0f;
int max_neighbour_distance_index = 0;
for ( int j = 0; j < neighbour_size_max; ++j ) {
const float dist = neighbours_distance_squared[j];
if ( dist > max_neighbour_distance ) {
max_neighbour_distance = dist;
max_neighbour_distance_index = j;
}
}
if (max_neighbour_distance>d2) {
neighbours[max_neighbour_distance_index] = cur_index;
neighbours_distance_squared[max_neighbour_distance_index] = d2;
}
}
}
++i;
}
}
__syncthreads();
}
}
__global__ void find_neighbours_simulate_dyn_gr (
const ds::deviceT::dyn_grid &grid_,
const cupp::deviceT::vector< Vec3 > &positions,
const cupp::deviceT::vector< Vec3 > &forwards,
cupp::deviceT::vector< Vec3 > &steering_results)
{
__shared__ ds::deviceT::dyn_grid grid;
grid = grid_;
__shared__ dyn_grid_node cur_block;
__shared__ dyn_grid_node neighbour_block;
if (threadIdx.x == 0) {
cur_block = grid.get_block_data(blockIdx.x);
}
if (threadIdx.x == 1) {
neighbour_block = grid.get_area(blockIdx.x);
}
__syncthreads();
// constants that are needed below
unsigned int my_index;
Vec3 position;
Vec3 forward;
int neighbours_found = 0;
int neighbours[neighbour_size_max];
float neighbours_distance_squared[neighbour_size_max];
if (threadIdx.x < cur_block.size) {
my_index = grid.get_index(cur_block, threadIdx.x);
position = grid.get_position(cur_block, threadIdx.x);
forward = forwards [my_index];
for (int i=0; i<neighbour_size_max; ++i) {
neighbours[i]=-1;
}
}
__syncthreads();
neighbour_search(grid, neighbour_block, cur_block, my_index, position, neighbours_found, neighbours, neighbours_distance_squared);
if (threadIdx.x >= cur_block.size) {
return;
}
bool do_seperation[neighbour_size_max];
bool do_alignment[neighbour_size_max];
bool do_cohesion[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
if (neighbours[i]==-1) {
do_seperation[i] = false;
do_alignment[i] = false;
do_cohesion[i] = false;
continue;
}
const float dist = neighbours_distance_squared[i];
if ( dist < boid_radius*3.0f ) {
do_seperation[i] = true;
do_alignment[i] = true;
do_cohesion[i] = true;
continue;
}
const Vec3 unitOffset = (position - positions[ neighbours[i]]) / sqrtf (dist);
const float forwardness = forward.dot (unitOffset);
do_seperation[i] = (forwardness > separationAngle && dist <= separationRadius*separationRadius);
do_alignment[i] = (forwardness > alignmentAngle && dist <= alignmentRadius*alignmentRadius);
do_cohesion[i] = (forwardness > cohesionAngle && dist <= cohesionRadius*cohesionRadius);
}
Vec3 separation = { 0.0f, 0.0f, 0.0f };
Vec3 alignment = { 0.0f, 0.0f, 0.0f };
Vec3 cohesion = { 0.0f, 0.0f, 0.0f };
int influencing_alignment_neighbour_count = 0;
int influencing_cohesion_neighbour_count = 0;
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo avoid obstacles if needed
int index = neighbours[i];
if (do_seperation[i]) {
Vec3 temp = position - positions[index];
if ( 0.0f != temp.lengthSquared() ) {
separation = separation + (temp / temp.lengthSquared());
} else {
separation = separation + temp;
}
}
if (do_alignment[i]) {
// accumulate sum of neighbor's heading
alignment = alignment + forwards[ index ];
// count neighbors
++influencing_alignment_neighbour_count;
}
if (do_cohesion[i]) {
// accumulate sum of neighbor's positions
cohesion = cohesion + positions[index];
// count neighbors
++influencing_cohesion_neighbour_count;
}
}
alignment = alignment - ( forward * influencing_alignment_neighbour_count );
cohesion = cohesion - ( position * influencing_cohesion_neighbour_count );
// apply weights to components (save in variables for annotation)
const Vec3 separationW = separation.normalize() * separationWeight;
const Vec3 alignmentW = alignment.normalize() * alignmentWeight;
const Vec3 cohesionW = cohesion.normalize() * cohesionWeight;
steering_results[my_index] = separationW + alignmentW + cohesionW;
}
#if 0
__global__ void find_neighbours_simulate_dyn_gr (
const ds::deviceT::dyn_grid &grid,
const cupp::deviceT::vector< Vec3 > &positions,
const cupp::deviceT::vector< Vec3 > &forwards,
cupp::deviceT::vector< Vec3 > &steering_results)
{
__shared__ dyn_grid_node cur_block;
if (threadIdx.x == 0) {
cur_block = grid.get_block_data(blockIdx.x);
}
__syncthreads();
// constants that are needed below
const PosIndexPair me = threadIdx.x < cur_block.size ? grid.get_PosIndex(cur_block, threadIdx.x) : PosIndexPair(); //local
const unsigned int &my_index = me.index();
const Vec3 &position = me.position();
const Vec3 forward = forwards [my_index];
int neighbours_found = 0;
int neighbours[neighbour_size_max];
float neighbours_distance_squared[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
neighbours[i]=-1;
}
__syncthreads();
neighbour_search(grid, cur_block, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared); // in our own grid
for (std::size_t i = blockIdx.x+1; i < gridDim.x; ++i) {
const dyn_grid_node temp = grid.get_block_data(i);
if ( temp.low_x - cur_block.high_x >= r ) {
break;
}
neighbour_search(grid, temp, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared);
}
for (int i = blockIdx.x-1; i>=0; --i) {
const dyn_grid_node temp = grid.get_block_data(i);
if ( cur_block.low_x - temp.high_x >= r ) {
break;
}
neighbour_search(grid, temp, cur_block, me, neighbours_found, neighbours, neighbours_distance_squared);
}
if (threadIdx.x >= cur_block.size) {
return;
}
bool do_seperation[neighbour_size_max];
bool do_alignment[neighbour_size_max];
bool do_cohesion[neighbour_size_max];
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo try loop till neighbours_found and remove if <- requires more registers
if (neighbours[i]==-1) {
do_seperation[i] = false;
do_alignment[i] = false;
do_cohesion[i] = false;
continue;
}
const float dist = neighbours_distance_squared[i];
if ( dist < boid_radius*3.0f ) {
do_seperation[i] = true;
do_alignment[i] = true;
do_cohesion[i] = true;
continue;
}
const Vec3 unitOffset = (position - positions[ neighbours[i]]) / sqrtf (dist);
const float forwardness = forward.dot (unitOffset);
do_seperation[i] = (forwardness > separationAngle && dist <= separationRadius*separationRadius);
do_alignment[i] = (forwardness > alignmentAngle && dist <= alignmentRadius*alignmentRadius);
do_cohesion[i] = (forwardness > cohesionAngle && dist <= cohesionRadius*cohesionRadius);
}
Vec3 separation = { 0.0f, 0.0f, 0.0f };
Vec3 alignment = { 0.0f, 0.0f, 0.0f };
Vec3 cohesion = { 0.0f, 0.0f, 0.0f };
int influencing_alignment_neighbour_count = 0;
int influencing_cohesion_neighbour_count = 0;
for (int i=0; i<neighbour_size_max; ++i) {
/// @todo avoid obstacles if needed
int index = neighbours[i];
if (do_seperation[i]) {
Vec3 temp = position - positions[index];
if ( 0.0f != temp.lengthSquared() ) {
separation = separation + (temp / temp.lengthSquared());
} else {
separation = separation + temp;
}
}
if (do_alignment[i]) {
// accumulate sum of neighbor's heading
alignment = alignment + forwards[ index ];
// count neighbors
++influencing_alignment_neighbour_count;
}
if (do_cohesion[i]) {
// accumulate sum of neighbor's positions
cohesion = cohesion + positions[index];
// count neighbors
++influencing_cohesion_neighbour_count;
}
}
alignment = alignment - ( forward * influencing_alignment_neighbour_count );
cohesion = cohesion - ( position * influencing_cohesion_neighbour_count );
// apply weights to components (save in variables for annotation)
const Vec3 separationW = separation.normalize() * separationWeight;
const Vec3 alignmentW = alignment.normalize() * alignmentWeight;
const Vec3 cohesionW = cohesion.normalize() * cohesionWeight;
steering_results[my_index] = separationW + alignmentW + cohesionW;
}
#endif
find_neighbours_simulate_dyn_gr_kernelT get_find_neighbours_simulate_dyn_grid_kernel() {
return (find_neighbours_simulate_dyn_gr_kernelT)find_neighbours_simulate_dyn_gr;
}
|
3e1584e7e6d83a67e69da1f327b02bc48e5d2e28.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
/* I typed most of these in from the versions given in the book the change
* in performance of the kerneks using shared memory was negligable and
* actually increased in some cases. This was probably due to the speed
* of my global memory unlike the author my throughput never changed much
* regardless of which version I used.
*/
#define BDIMX 32
#define BDIMY 16
#define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL))
#define IPAD 1
// function for checking the CUDA runtime API results.
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void printData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf_s("%3.0f ", in[i]);
}
printf_s("\n");
return;
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf_s("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]);
break;
}
}
if (!match) break;
}
if (!match) printf_s("Arrays do not match.\n\n");
}
void transposeHost(float *out, float *in, const int nrows, const int ncols)
{
for (int iy = 0; iy < nrows; ++iy)
{
for (int ix = 0; ix < ncols; ++ix)
{
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
}
}
__global__ void copyGmem(float *out, float *in, const int nx, const int ny)
{
// matrix coordinate(ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// used original kernel given in text the one provided in the download transposeRectangle is an exacr copy of naiveGmem kernel
// which destroyed his idea of an upper/lower bound against which performance could be judged.
if (ix < nx && iy < ny)
{
out[iy*nx + ix] = in[iy*nx + ix];
}
}
__global__ void naiveGmem(float *out, float *in, const int nx, const int ny)
{
// matrix coordinate (ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[ix*ny + iy] = in[iy*nx + ix];
}
}
__global__ void transposeSmem(float *out, float *in, const int nx, const int ny)
{
__shared__ float tile[BDIMY][BDIMX];
// original matrix coordinate (ix,iy)
unsigned int ix, iy, ti, to;
ix = blockIdx.x * blockDim.x + threadIdx.x;
iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y*blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix_col, iy_row;
ix_col = blockIdx.y * blockDim.y + icol;
iy_row = blockIdx.x * blockDim.x + irow;
// linear
to = iy_row * ny + ix_col;
if (ix_col < nx && iy_row < ny)
{
tile[threadIdx.y][threadIdx.x] = in[ti];
__syncthreads();
out[to] = tile[icol][irow];
}
}
__global__ void transposeSmemPad(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY * (BDIMX + IPAD)];
// coordinate in original matrix
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load one row from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
__syncthreads();
// store one rowsto global memory from one column of shared memory
unsigned int col_idx = icol * (blockDim.x + IPAD) + irow;
out[to] = tile[col_idx];
}
}
__global__ void transposeSmemUnrollPad(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY * (BDIMX * 2 + IPAD)];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
__global__ void transposeSmemUnrollPadDyn(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
checkCuda(hipGetDeviceProperties(&deviceProp, dev));
printf_s("%s starting transpose at ", argv[0]);
printf_s("device %d: %s ", dev, deviceProp.name);
checkCuda(hipSetDevice(dev));
bool iprint = 0;
// set size of matrix
int nrows = 1 << 12;
int ncols = 1 << 12;
if (argc > 1) iprint = atoi(argv[1]);
if (argc > 2) nrows = atoi(argv[2]);
if (argc > 3) ncols = atoi(argv[3]);
printf_s(" with matrix nrows %d ncols %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// set kernel parameters
dim3 block(BDIMX, BDIMY);
/*
* Map CUDA blocks/threads to output space. Map rows in output to same
* x-value in CUDA, columns to same y-value.
*/
dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_a = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host data
initialData(h_a, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_a, nrows, ncols);
// allocate device memory
float *d_a, *d_result;
checkCuda(hipMalloc(&d_a, nBytes));
checkCuda(hipMalloc(&d_result, nBytes));
// copy kernel just for performance comparison
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( copyGmem) , dim3(grid), dim3(block) , 0, 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
// kernels too fast for basic cpu timers have used MS high-resolution timers, but its easier to use NSIGHT
printf_s("copyGmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
// naive transpose
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( naiveGmem) , dim3(grid), dim3(block) , 0, 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("naiveGmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// naive shared memory transpose
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transposeSmem) , dim3(grid), dim3(block) , 0, 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// shared memory with pad
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transposeSmemPad) , dim3(grid), dim3(block) , 0, 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemPad <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// shared memory unroll with pad
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transposeSmemUnrollPad) , dim3(grid2), dim3(block) , 0, 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemUnrollPad <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x/2, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// dynamically allocated shared memory unroll with pad
checkCuda(hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transposeSmemUnrollPadDyn) , dim3(grid2), dim3(block), (BDIMX * 2 + IPAD) * BDIMY * sizeof(float) , 0, d_result, d_a, nrows, ncols);
checkCuda(hipDeviceSynchronize());
checkCuda(hipMemcpy(gpuRef, d_result, nBytes, hipMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemUnrollPadDyn <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x/2, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// free memory
checkCuda(hipFree(d_a));
checkCuda(hipFree(d_result));
free(hostRef);
free(gpuRef);
free(h_a);
checkCuda(hipDeviceReset());
return EXIT_SUCCESS;
} | 3e1584e7e6d83a67e69da1f327b02bc48e5d2e28.cu | #include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
/* I typed most of these in from the versions given in the book the change
* in performance of the kerneks using shared memory was negligable and
* actually increased in some cases. This was probably due to the speed
* of my global memory unlike the author my throughput never changed much
* regardless of which version I used.
*/
#define BDIMX 32
#define BDIMY 16
#define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL))
#define IPAD 1
// function for checking the CUDA runtime API results.
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void printData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf_s("%3.0f ", in[i]);
}
printf_s("\n");
return;
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf_s("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]);
break;
}
}
if (!match) break;
}
if (!match) printf_s("Arrays do not match.\n\n");
}
void transposeHost(float *out, float *in, const int nrows, const int ncols)
{
for (int iy = 0; iy < nrows; ++iy)
{
for (int ix = 0; ix < ncols; ++ix)
{
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
}
}
__global__ void copyGmem(float *out, float *in, const int nx, const int ny)
{
// matrix coordinate(ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// used original kernel given in text the one provided in the download transposeRectangle is an exacr copy of naiveGmem kernel
// which destroyed his idea of an upper/lower bound against which performance could be judged.
if (ix < nx && iy < ny)
{
out[iy*nx + ix] = in[iy*nx + ix];
}
}
__global__ void naiveGmem(float *out, float *in, const int nx, const int ny)
{
// matrix coordinate (ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[ix*ny + iy] = in[iy*nx + ix];
}
}
__global__ void transposeSmem(float *out, float *in, const int nx, const int ny)
{
__shared__ float tile[BDIMY][BDIMX];
// original matrix coordinate (ix,iy)
unsigned int ix, iy, ti, to;
ix = blockIdx.x * blockDim.x + threadIdx.x;
iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y*blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix_col, iy_row;
ix_col = blockIdx.y * blockDim.y + icol;
iy_row = blockIdx.x * blockDim.x + irow;
// linear
to = iy_row * ny + ix_col;
if (ix_col < nx && iy_row < ny)
{
tile[threadIdx.y][threadIdx.x] = in[ti];
__syncthreads();
out[to] = tile[icol][irow];
}
}
__global__ void transposeSmemPad(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY * (BDIMX + IPAD)];
// coordinate in original matrix
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load one row from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
__syncthreads();
// store one rowsto global memory from one column of shared memory
unsigned int col_idx = icol * (blockDim.x + IPAD) + irow;
out[to] = tile[col_idx];
}
}
__global__ void transposeSmemUnrollPad(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY * (BDIMX * 2 + IPAD)];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
__global__ void transposeSmemUnrollPadDyn(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
checkCuda(cudaGetDeviceProperties(&deviceProp, dev));
printf_s("%s starting transpose at ", argv[0]);
printf_s("device %d: %s ", dev, deviceProp.name);
checkCuda(cudaSetDevice(dev));
bool iprint = 0;
// set size of matrix
int nrows = 1 << 12;
int ncols = 1 << 12;
if (argc > 1) iprint = atoi(argv[1]);
if (argc > 2) nrows = atoi(argv[2]);
if (argc > 3) ncols = atoi(argv[3]);
printf_s(" with matrix nrows %d ncols %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// set kernel parameters
dim3 block(BDIMX, BDIMY);
/*
* Map CUDA blocks/threads to output space. Map rows in output to same
* x-value in CUDA, columns to same y-value.
*/
dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_a = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host data
initialData(h_a, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_a, nrows, ncols);
// allocate device memory
float *d_a, *d_result;
checkCuda(cudaMalloc(&d_a, nBytes));
checkCuda(cudaMalloc(&d_result, nBytes));
// copy kernel just for performance comparison
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
copyGmem <<<grid, block >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
// kernels too fast for basic cpu timers have used MS high-resolution timers, but its easier to use NSIGHT
printf_s("copyGmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
// naive transpose
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
naiveGmem <<<grid, block >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("naiveGmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// naive shared memory transpose
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
transposeSmem <<<grid, block >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmem <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// shared memory with pad
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
transposeSmemPad <<<grid, block >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemPad <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// shared memory unroll with pad
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
transposeSmemUnrollPad <<<grid2, block >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemUnrollPad <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x/2, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// dynamically allocated shared memory unroll with pad
checkCuda(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice));
transposeSmemUnrollPadDyn <<<grid2, block, (BDIMX * 2 + IPAD) * BDIMY * sizeof(float) >>> (d_result, d_a, nrows, ncols);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(gpuRef, d_result, nBytes, cudaMemcpyDeviceToHost));
if (iprint) printData(gpuRef, nrows * ncols);
printf_s("transposeSmemUnrollPadDyn <<< grid (%d,%d) block (%d,%d)>>>\n\n", grid.x/2, grid.y, block.x, block.y);
checkResult(hostRef, gpuRef, nrows, ncols);
// free memory
checkCuda(cudaFree(d_a));
checkCuda(cudaFree(d_result));
free(hostRef);
free(gpuRef);
free(h_a);
checkCuda(cudaDeviceReset());
return EXIT_SUCCESS;
} |
fc97e8fd0e834689e748c4d9405c5c6071f3c1c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(long N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
long index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
void
getArrays(long size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
xarray = (float**)malloc(size*sizeof(float));
yarray = (float**)malloc(size*sizeof(float));
resultarray = (float**)malloc(size*sizeof(float));
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
free(xarray);
free(yarray);
free(resultarray);
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// hipMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
long size = total_elems*sizeof(float);
hipError_t err_x = hipMalloc((void **)&device_x,size);
if(err_x != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_x),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
hipError_t err_y = hipMalloc((void **)&device_y,size);
if(err_y != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_y),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
hipError_t err_res = hipMalloc((void **)&device_result,size);
if(err_res != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_res),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: Compute number of thread blocks.
//
const int numBlocks = (total_elems-1)/threadsPerBlock + 1;
//
// TODO: copy input arrays to the GPU using hipMemcpy
//
double copyStart = CycleTimer::currentSeconds();
hipError_t err_xcpy = hipMemcpy(device_x,xarray,size,hipMemcpyHostToDevice);
if(err_xcpy != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_xcpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
hipError_t err_ycpy = hipMemcpy(device_y,yarray,size,hipMemcpyHostToDevice);
if(err_ycpy != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_ycpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
timeCopyH2DAvg += CycleTimer::currentSeconds() - copyStart;
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// run saxpy_kernel on the GPU
hipLaunchKernelGGL(( saxpy_kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, total_elems, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
hipError_t err_sync = hipDeviceSynchronize();
if(err_sync != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_sync),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
double endGPUTime = CycleTimer::currentSeconds();
timeKernelAvg += endGPUTime - startGPUTime;
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: copy result from GPU using hipMemcpy
//
copyStart = CycleTimer::currentSeconds();
hipError_t err_rescpy = hipMemcpy(resultarray,device_result,size,hipMemcpyDeviceToHost);
if(err_rescpy != hipSuccess) {
fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err_rescpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
timeCopyD2HAvg += CycleTimer::currentSeconds() - copyStart;
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
//
// TODO free memory buffers on the GPU
//
hipFree(device_x);
hipFree(device_y);
hipFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| fc97e8fd0e834689e748c4d9405c5c6071f3c1c5.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(long N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
long index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
void
getArrays(long size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
xarray = (float**)malloc(size*sizeof(float));
yarray = (float**)malloc(size*sizeof(float));
resultarray = (float**)malloc(size*sizeof(float));
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
free(xarray);
free(yarray);
free(resultarray);
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// cudaMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
long size = total_elems*sizeof(float);
cudaError_t err_x = cudaMalloc((void **)&device_x,size);
if(err_x != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_x),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
cudaError_t err_y = cudaMalloc((void **)&device_y,size);
if(err_y != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_y),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
cudaError_t err_res = cudaMalloc((void **)&device_result,size);
if(err_res != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_res),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: Compute number of thread blocks.
//
const int numBlocks = (total_elems-1)/threadsPerBlock + 1;
//
// TODO: copy input arrays to the GPU using cudaMemcpy
//
double copyStart = CycleTimer::currentSeconds();
cudaError_t err_xcpy = cudaMemcpy(device_x,xarray,size,cudaMemcpyHostToDevice);
if(err_xcpy != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_xcpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
cudaError_t err_ycpy = cudaMemcpy(device_y,yarray,size,cudaMemcpyHostToDevice);
if(err_ycpy != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_ycpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
timeCopyH2DAvg += CycleTimer::currentSeconds() - copyStart;
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// run saxpy_kernel on the GPU
saxpy_kernel<<<numBlocks,threadsPerBlock>>>(total_elems, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
cudaError_t err_sync = cudaDeviceSynchronize();
if(err_sync != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_sync),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
double endGPUTime = CycleTimer::currentSeconds();
timeKernelAvg += endGPUTime - startGPUTime;
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: copy result from GPU using cudaMemcpy
//
copyStart = CycleTimer::currentSeconds();
cudaError_t err_rescpy = cudaMemcpy(resultarray,device_result,size,cudaMemcpyDeviceToHost);
if(err_rescpy != cudaSuccess) {
fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err_rescpy),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
timeCopyD2HAvg += CycleTimer::currentSeconds() - copyStart;
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
//
// TODO free memory buffers on the GPU
//
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
4d335da5c7967e3cf6302857f92eea95d967b83a.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdlib.h>
// CUDA runtime
#include "helper.h"
#include <rocblas.h>
#include <hip/hip_runtime.h>
// a = mxk, b = kxn
template <int BLOCK>
__global__ void sgemm(int m, int n, int k, float *a, float *b, float *c) {
// blockIdx control subpanel matrix
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int bx = blockIdx.x;
const int by = blockIdx.y;
float *begin_a = a + bx * BLOCK * k;
float *begin_b = b + by * BLOCK;
float *end_a = begin_a + k;
float sum = 0.f;
for (float *a_ptr = begin_a, *b_ptr = begin_b; a_ptr < end_a;
a_ptr += BLOCK, b_ptr += BLOCK * n) {
__shared__ float ashare[BLOCK][BLOCK];
__shared__ float bshare[BLOCK][BLOCK];
ashare[ty][tx] = a_ptr[ty * k + tx];
bshare[ty][tx] = b_ptr[ty * n + tx];
__syncthreads();
#pragma unroll
for (int kk = 0; kk < BLOCK; ++kk) {
sum += ashare[ty][kk] * bshare[kk][tx];
}
__syncthreads();
}
c[(BLOCK * bx + ty) * n + BLOCK * by + tx] = sum;
}
void MY_MMult(hipblasHandle_t handle, int m, int n, int k, float *d_A, int lda,
float *d_B, int ldb, float *d_C, int ldc) {
constexpr int BLOCK = 16;
dim3 block(BLOCK, BLOCK);
dim3 grid((m + BLOCK - 1) / BLOCK, (n + BLOCK - 1) / BLOCK);
hipLaunchKernelGGL(( sgemm<BLOCK>), dim3(grid), dim3(block), 0, 0, m, n, k, d_A, d_B, d_C);
}
| 4d335da5c7967e3cf6302857f92eea95d967b83a.cu | #include <assert.h>
#include <stdlib.h>
// CUDA runtime
#include "helper.h"
#include <cublas_v2.h>
#include <cuda_runtime.h>
// a = mxk, b = kxn
template <int BLOCK>
__global__ void sgemm(int m, int n, int k, float *a, float *b, float *c) {
// blockIdx control subpanel matrix
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int bx = blockIdx.x;
const int by = blockIdx.y;
float *begin_a = a + bx * BLOCK * k;
float *begin_b = b + by * BLOCK;
float *end_a = begin_a + k;
float sum = 0.f;
for (float *a_ptr = begin_a, *b_ptr = begin_b; a_ptr < end_a;
a_ptr += BLOCK, b_ptr += BLOCK * n) {
__shared__ float ashare[BLOCK][BLOCK];
__shared__ float bshare[BLOCK][BLOCK];
ashare[ty][tx] = a_ptr[ty * k + tx];
bshare[ty][tx] = b_ptr[ty * n + tx];
__syncthreads();
#pragma unroll
for (int kk = 0; kk < BLOCK; ++kk) {
sum += ashare[ty][kk] * bshare[kk][tx];
}
__syncthreads();
}
c[(BLOCK * bx + ty) * n + BLOCK * by + tx] = sum;
}
void MY_MMult(cublasHandle_t handle, int m, int n, int k, float *d_A, int lda,
float *d_B, int ldb, float *d_C, int ldc) {
constexpr int BLOCK = 16;
dim3 block(BLOCK, BLOCK);
dim3 grid((m + BLOCK - 1) / BLOCK, (n + BLOCK - 1) / BLOCK);
sgemm<BLOCK><<<grid, block>>>(m, n, k, d_A, d_B, d_C);
}
|
8ac9f530a497a2982b23f730bd4bea7876df50da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dconv.hpp"
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void dconv_opt::init(){
init_base();
}
void dconv_opt::reshape(int num, int channel, int height, int width){
if (!reshape_base(num, channel, height, width)) return;
index_mat_ = at::zeros({height_,width_,2},at::kInt);
tmp_ = at::zeros({kernel_size_*kernel_size_, group_out_, height*width, num},at::kFloat).to(torch::Device(torch::kCUDA, device_));
plan_sum_ = 0;
mod_ = height_ + width_ + ngroup_ - 2;
int pidx = 0;
int stride = height_*width_;
int* idx = index_mat_.data_ptr<int>();
plan_idx_.clear();
for (int pn = 0; pn < height_ + width_ - 1; pn++) {
plan_idx_.push_back(pidx);
int ph = pn >= width_ ? pn - width_ + 1 : 0;
for (int j=0; ph < height_; ph++,j++) {
int pw = pn - ph;
if (pw < 0) break;
idx[pidx] = ph;
idx[pidx + stride] = pw;
pidx += 1;
}
}
plan_idx_.push_back(pidx);
index_mat_= index_mat_.to(torch::Device(torch::kCUDA, device_));
//printf("%d %d %d %d\n", height_,width_,mod_,plan_idx_[0]);
}
void dconv_opt::reshape_top(at::TensorOptions option){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,nout_,height_,width_});
reshape_top_base(option,shapes);
}
template <typename scalar_t>
__global__ void deocder_conv_data_to_col_gpu(const int size, const scalar_t * input, const scalar_t* weight,
scalar_t * output, const int * index, const int index_stride,const int kernel_size,
const int group_in, const int group_out, const int num, const int height, const int width, const int start_idx,
const int psum, const int inner_shape, const int channel, const int constrain) {
CUDA_KERNEL_LOOP(i, size) {
int pn = i % num;
int pb = (i / num) % inner_shape;
int pidx = pb + start_idx;
int th = index[pidx];
int tw = index[pidx + index_stride];
int og = (i / num / inner_shape) % group_out;
int ks = i / num / inner_shape / group_out;
int kw = ks % kernel_size;
int kh = ks / kernel_size;
int half_kernel = kernel_size/2;
int ph = th - half_kernel + kh;
int pw = tw - half_kernel + kw;
if(ph >= height || ph < 0 || pw >= width || pw<0)
continue;
scalar_t sum = 0;
int tc = (psum - th - tw);
int nchannel = constrain==5?(psum - ph - pw) * group_in:(psum - ph - pw + 1) * group_in;
if(nchannel>channel)
nchannel = channel;
int skernel = kernel_size * kernel_size;
int weight_base = (tc * group_out + og)* channel * skernel+ ks;
int data_base = (pn * channel* height + ph) * width + pw;
for(int ti = 0; ti < nchannel; ti++){
sum = sum + input[data_base+ti*index_stride]*weight[weight_base+ti*skernel];
}
output[i] = sum;
}
}
template <typename scalar_t>
__global__ void deocder_conv_col_to_data_gpu(const int size, const scalar_t * input, const scalar_t * bias, scalar_t * output,
const int * index, const int index_stride, const int group_out, const int start_idx, const int psum,
const int height, const int width, const int nout, const int num, const int inner_shape) {
CUDA_KERNEL_LOOP(i, size) {
int pn = i % num;
int pb = (i / num) % inner_shape;
int pidx = pb + start_idx;
int th = index[pidx];
int tw = index[pidx + index_stride];
int tc = (psum - th - tw);
int og = (i / num / inner_shape) % group_out;
int pout = (tc * group_out + og);
int out_idx = ((pn*nout+pout)*height+th)*width + tw;
output[out_idx] = input[i]+bias[pout];
}
}
template <typename scalar_t>
__global__ void deocder_conv_sum_gpu(const int size, scalar_t * data, const int inner_shape, const int sum_size){
CUDA_KERNEL_LOOP(i, size) {
for(int ti = 1; ti< sum_size; ti++)
data[i] += data[i+ti*inner_shape];
}
}
std::vector<at::Tensor> dconv_opt::forward_cuda(at::Tensor bottom_data, at::Tensor weight, at::Tensor bias)
{
//printf("here0!\n");
reshape(bottom_data.size(0), channel_, bottom_data.size(2), bottom_data.size(3));
reshape_top(bottom_data.options());
int h_ = height_;
int w_ = width_;
int ch_ = channel_;
int la = plan_sum_ >= ngroup_ ? plan_sum_ - ngroup_ + 1 : 0;
int lb = plan_sum_ > h_ + w_ - 2 ? h_ + w_ - 2 : plan_sum_;
int inner_shape = (plan_idx_[lb + 1] - plan_idx_[la]);
int skernel = kernel_size_*kernel_size_;
int cnt = skernel*group_out_*inner_shape*num_;
//printf("here!\n");
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "dconv_forward_cuda",
([&] {
timer_->start();
hipMemset(tmp_.data_ptr<scalar_t>(), scalar_t(0.0), kernel_size_*kernel_size_* group_out_* inner_shape* num_*sizeof(scalar_t));
timer_->stop("set zero");
timer_->start();
hipLaunchKernelGGL(( deocder_conv_data_to_col_gpu<scalar_t>), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0 , stream_,
cnt, bottom_data.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), tmp_.data_ptr<scalar_t>(), index_mat_.data_ptr<int>(),
h_*w_, kernel_size_, group_in_, group_out_, num_, h_, w_, plan_idx_[la], plan_sum_, inner_shape, ch_, constrain_);
timer_->stop("kernel 1");
timer_->start();
cnt = group_out_*inner_shape*num_;
hipLaunchKernelGGL(( deocder_conv_sum_gpu<scalar_t>), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0 , stream_,
cnt, tmp_.data_ptr<scalar_t>(), cnt, skernel);
timer_->stop("kernel 2");
timer_->start();
hipLaunchKernelGGL(( deocder_conv_col_to_data_gpu), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0 , stream_,
cnt, tmp_.data_ptr<scalar_t>(), bias.data_ptr<scalar_t>(), top_data_[0].data_ptr<scalar_t>(),
index_mat_.data_ptr<int>(), h_*w_, group_out_, plan_idx_[la], plan_sum_,
h_, w_, nout_, num_, inner_shape);
timer_->stop("kernel 3");
CUDA_POST_KERNEL_CHECK;
}
)
);
plan_sum_ = (plan_sum_ + 1) % mod_;
return top_data_;
}
std::vector<at::Tensor> dconv_opt::backward_cuda(at::Tensor top_diff)
{
return {};
} | 8ac9f530a497a2982b23f730bd4bea7876df50da.cu | #include "dconv.hpp"
#include <curand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void dconv_opt::init(){
init_base();
}
void dconv_opt::reshape(int num, int channel, int height, int width){
if (!reshape_base(num, channel, height, width)) return;
index_mat_ = at::zeros({height_,width_,2},at::kInt);
tmp_ = at::zeros({kernel_size_*kernel_size_, group_out_, height*width, num},at::kFloat).to(torch::Device(torch::kCUDA, device_));
plan_sum_ = 0;
mod_ = height_ + width_ + ngroup_ - 2;
int pidx = 0;
int stride = height_*width_;
int* idx = index_mat_.data_ptr<int>();
plan_idx_.clear();
for (int pn = 0; pn < height_ + width_ - 1; pn++) {
plan_idx_.push_back(pidx);
int ph = pn >= width_ ? pn - width_ + 1 : 0;
for (int j=0; ph < height_; ph++,j++) {
int pw = pn - ph;
if (pw < 0) break;
idx[pidx] = ph;
idx[pidx + stride] = pw;
pidx += 1;
}
}
plan_idx_.push_back(pidx);
index_mat_= index_mat_.to(torch::Device(torch::kCUDA, device_));
//printf("%d %d %d %d\n", height_,width_,mod_,plan_idx_[0]);
}
void dconv_opt::reshape_top(at::TensorOptions option){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,nout_,height_,width_});
reshape_top_base(option,shapes);
}
template <typename scalar_t>
__global__ void deocder_conv_data_to_col_gpu(const int size, const scalar_t * input, const scalar_t* weight,
scalar_t * output, const int * index, const int index_stride,const int kernel_size,
const int group_in, const int group_out, const int num, const int height, const int width, const int start_idx,
const int psum, const int inner_shape, const int channel, const int constrain) {
CUDA_KERNEL_LOOP(i, size) {
int pn = i % num;
int pb = (i / num) % inner_shape;
int pidx = pb + start_idx;
int th = index[pidx];
int tw = index[pidx + index_stride];
int og = (i / num / inner_shape) % group_out;
int ks = i / num / inner_shape / group_out;
int kw = ks % kernel_size;
int kh = ks / kernel_size;
int half_kernel = kernel_size/2;
int ph = th - half_kernel + kh;
int pw = tw - half_kernel + kw;
if(ph >= height || ph < 0 || pw >= width || pw<0)
continue;
scalar_t sum = 0;
int tc = (psum - th - tw);
int nchannel = constrain==5?(psum - ph - pw) * group_in:(psum - ph - pw + 1) * group_in;
if(nchannel>channel)
nchannel = channel;
int skernel = kernel_size * kernel_size;
int weight_base = (tc * group_out + og)* channel * skernel+ ks;
int data_base = (pn * channel* height + ph) * width + pw;
for(int ti = 0; ti < nchannel; ti++){
sum = sum + input[data_base+ti*index_stride]*weight[weight_base+ti*skernel];
}
output[i] = sum;
}
}
template <typename scalar_t>
__global__ void deocder_conv_col_to_data_gpu(const int size, const scalar_t * input, const scalar_t * bias, scalar_t * output,
const int * index, const int index_stride, const int group_out, const int start_idx, const int psum,
const int height, const int width, const int nout, const int num, const int inner_shape) {
CUDA_KERNEL_LOOP(i, size) {
int pn = i % num;
int pb = (i / num) % inner_shape;
int pidx = pb + start_idx;
int th = index[pidx];
int tw = index[pidx + index_stride];
int tc = (psum - th - tw);
int og = (i / num / inner_shape) % group_out;
int pout = (tc * group_out + og);
int out_idx = ((pn*nout+pout)*height+th)*width + tw;
output[out_idx] = input[i]+bias[pout];
}
}
template <typename scalar_t>
__global__ void deocder_conv_sum_gpu(const int size, scalar_t * data, const int inner_shape, const int sum_size){
CUDA_KERNEL_LOOP(i, size) {
for(int ti = 1; ti< sum_size; ti++)
data[i] += data[i+ti*inner_shape];
}
}
std::vector<at::Tensor> dconv_opt::forward_cuda(at::Tensor bottom_data, at::Tensor weight, at::Tensor bias)
{
//printf("here0!\n");
reshape(bottom_data.size(0), channel_, bottom_data.size(2), bottom_data.size(3));
reshape_top(bottom_data.options());
int h_ = height_;
int w_ = width_;
int ch_ = channel_;
int la = plan_sum_ >= ngroup_ ? plan_sum_ - ngroup_ + 1 : 0;
int lb = plan_sum_ > h_ + w_ - 2 ? h_ + w_ - 2 : plan_sum_;
int inner_shape = (plan_idx_[lb + 1] - plan_idx_[la]);
int skernel = kernel_size_*kernel_size_;
int cnt = skernel*group_out_*inner_shape*num_;
//printf("here!\n");
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "dconv_forward_cuda",
([&] {
timer_->start();
cudaMemset(tmp_.data_ptr<scalar_t>(), scalar_t(0.0), kernel_size_*kernel_size_* group_out_* inner_shape* num_*sizeof(scalar_t));
timer_->stop("set zero");
timer_->start();
deocder_conv_data_to_col_gpu<scalar_t><<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS, 0 , stream_>>>(
cnt, bottom_data.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), tmp_.data_ptr<scalar_t>(), index_mat_.data_ptr<int>(),
h_*w_, kernel_size_, group_in_, group_out_, num_, h_, w_, plan_idx_[la], plan_sum_, inner_shape, ch_, constrain_);
timer_->stop("kernel 1");
timer_->start();
cnt = group_out_*inner_shape*num_;
deocder_conv_sum_gpu<scalar_t><<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS, 0 , stream_>>>(
cnt, tmp_.data_ptr<scalar_t>(), cnt, skernel);
timer_->stop("kernel 2");
timer_->start();
deocder_conv_col_to_data_gpu<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS, 0 , stream_>>>
(cnt, tmp_.data_ptr<scalar_t>(), bias.data_ptr<scalar_t>(), top_data_[0].data_ptr<scalar_t>(),
index_mat_.data_ptr<int>(), h_*w_, group_out_, plan_idx_[la], plan_sum_,
h_, w_, nout_, num_, inner_shape);
timer_->stop("kernel 3");
CUDA_POST_KERNEL_CHECK;
}
)
);
plan_sum_ = (plan_sum_ + 1) % mod_;
return top_data_;
}
std::vector<at::Tensor> dconv_opt::backward_cuda(at::Tensor top_diff)
{
return {};
} |
2cfdad96beb3b4947a5da1f6e7c069302fa58e1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "orange/surfaces/SurfaceAction.test.hh"
#include "base/KernelParamCalculator.cuda.hh"
using namespace celeritas_test;
__global__ void sa_test_kernel(SATestInput input)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= input.states.size())
return;
// Calculate distances in parallel
CalcSenseDistanceLauncher<> calc_thread{input.params, input.states};
calc_thread(tid);
}
| 2cfdad96beb3b4947a5da1f6e7c069302fa58e1f.cu | #include "orange/surfaces/SurfaceAction.test.hh"
#include "base/KernelParamCalculator.cuda.hh"
using namespace celeritas_test;
__global__ void sa_test_kernel(SATestInput input)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= input.states.size())
return;
// Calculate distances in parallel
CalcSenseDistanceLauncher<> calc_thread{input.params, input.states};
calc_thread(tid);
}
|
c800a9d6bb6ad144f3faf050466a284e97add96e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cnv.h"
#include "devbuffer.h"
#include "hostbuffer.h"
#include "norm.h"
#include "sim.h"
#include "stream.h"
#include "timer.h"
#include "utils.h"
#include "metrics.h"
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
using namespace std;
class Main {
private:
CNV cnv;
DevBuffer<real> cnvOnDev, simOnDev;
HostBuffer<real> simFromDev, cnvFromDev, simOnHost;
Timer t;
Timer::Frame fr;
string filename;
bool debugMode;
void copyToDev(int numb, int off, hipStream_t str = 0) {
real *dst = cnvOnDev + off * cnv.dim;
real *src = cnv.data + off * cnv.dim;
int nbytes = sizeof(real) * cnv.dim * numb;
check(hipMemcpyAsync(dst, src, nbytes, hipMemcpyHostToDevice, str));
}
void norm(int numb, int off, hipStream_t str = 0) {
dim3 grid(numb);
dim3 block(256);
real *dst = cnvOnDev + off * cnv.dim;
hipLaunchKernelGGL(( dev::norm_ker<256>), dim3(grid), dim3(block), 0, str, dst, cnv.dim);
check();
}
void sim(dim3 ext, dim3 off, hipStream_t str = 0) {
dim3 block(16, 16);
dim3 grid(ext.x / block.x + 1, ext.y / block.y + 1);
hipLaunchKernelGGL(( dev::sim_ker<16, 16>), dim3(grid), dim3(block), 0, str, cnvOnDev, ext, off, cnv.nvec, cnv.dim, simOnDev);
check();
}
void copySim() {
int nbytes = sizeof(real) * cnv.nvec * cnv.nvec;
check(hipMemcpy(simFromDev, simOnDev, nbytes, hipMemcpyDeviceToHost));
}
void overlapped() {
int part1 = cnv.nvec - cnv.nvec / 2, part2 = cnv.nvec / 2;
vector<Stream> streams(3);
cnvOnDev = DevBuffer<real>(cnv.dim * cnv.nvec);
simOnDev = DevBuffer<real>(cnv.nvec * cnv.nvec);
copyToDev(part1, 0, streams[0]);
norm(part1, 0, streams[0]);
sim(dim3(part1, part1), dim3(0, 0), streams[0]);
copyToDev(part2, part1, streams[1]);
norm(part2, part1, streams[1]);
hipDeviceSynchronize();
sim(dim3(part1, part2), dim3(0, part1), streams[0]);
sim(dim3(part2, part2), dim3(part1, part1), streams[1]);
sim(dim3(part2, part1), dim3(part1, 0), streams[2]);
hipDeviceSynchronize();
simFromDev = HostBuffer<real>(cnv.nvec * cnv.nvec);
copySim();
}
void nonOverlapped() {
int chunk = 16;
vector<Stream> streams(cnv.nvec / chunk + 1);
cnvOnDev = DevBuffer<real>(cnv.dim * cnv.nvec);
simOnDev = DevBuffer<real>(cnv.nvec * cnv.nvec);
for (int off = 0, ns = 0; off < cnv.nvec; off += chunk, ++ns) {
int span = min(chunk, cnv.nvec - off);
copyToDev(span, off, streams[ns]);
norm(span, off, streams[ns]);
}
hipDeviceSynchronize();
sim(dim3(cnv.nvec, cnv.nvec), dim3(0, 0));
simFromDev = HostBuffer<real>(cnv.nvec * cnv.nvec);
copySim();
}
public:
Main(int argc, char **argv) {
char *filenamePtr = NULL;
debugMode = false;
for (int i = 1; i < argc; ++i) {
if (!debugMode && (!strcmp(argv[i], "-g") || !strcmp(argv[i], "--debug")))
debugMode = true;
else if (!filenamePtr)
filenamePtr = argv[i];
}
if (!filenamePtr) throw invalid_argument("argv");
else filename = filenamePtr;
if (!debugMode)
cerr.setstate(ios::failbit);
}
int run() {
fr = t.measure("CNV");
cnv = CNV(filename.c_str());
fr.resolve();
fr = t.measure("Over", false);
for (int i = 0; i < 50; ++i) {
fr.enter();
overlapped();
fr.leave();
}
fr.resolve();
fr = t.measure("Non-Over", false);
for (int i = 0; i < 50; ++i) {
fr.enter();
nonOverlapped();
fr.leave();
}
fr.resolve();
if (debugMode) {
host::norm(cnv.data, cnv.nvec, cnv.dim);
cnvFromDev = HostBuffer<real>(cnv.nvec * cnv.dim);
int nbytes = sizeof(real) * cnv.nvec * cnv.dim;
check(hipMemcpy(cnvFromDev, cnvOnDev, nbytes, hipMemcpyDeviceToHost));
auto c = host::corr(cnvFromDev, cnv.data, cnv.nvec * cnv.dim);
cerr << "Corr [CNV]: " << c << '\n';
cerr << "Stats (simFromDev):\n";
host::stats(simFromDev, cnv.nvec).print();
cerr << "Stats (simOnHost):\n";
host::norm(cnv.data, cnv.nvec, cnv.dim);
simOnHost = HostBuffer<real>(cnv.nvec * cnv.nvec);
host::sim(cnv.data, cnv.nvec, cnv.dim, simOnHost);
host::stats(simOnHost, cnv.nvec).print();
c = host::corr(simFromDev, simOnHost, cnv.nvec * cnv.nvec);
cerr << "Corr [Sim]: " << c << '\n';
}
return EXIT_SUCCESS;
}
};
int main(int argc, char **argv) {
try {
return Main(argc, argv).run();
}
catch (invalid_argument) {
cout << "Wrong number of arguments\n";
cout << "Usage: " << argv[0] << " [-g|--debug] filename\n";
cout << "Exiting\n";
return EXIT_FAILURE;
}
catch (exception &e) {
cerr.clear();
cerr << "[ERROR] Message: " << e.what() << endl;
cout << "Exiting\n";
return EXIT_FAILURE;
}
}
| c800a9d6bb6ad144f3faf050466a284e97add96e.cu | #include "cnv.h"
#include "devbuffer.h"
#include "hostbuffer.h"
#include "norm.h"
#include "sim.h"
#include "stream.h"
#include "timer.h"
#include "utils.h"
#include "metrics.h"
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
using namespace std;
class Main {
private:
CNV cnv;
DevBuffer<real> cnvOnDev, simOnDev;
HostBuffer<real> simFromDev, cnvFromDev, simOnHost;
Timer t;
Timer::Frame fr;
string filename;
bool debugMode;
void copyToDev(int numb, int off, cudaStream_t str = 0) {
real *dst = cnvOnDev + off * cnv.dim;
real *src = cnv.data + off * cnv.dim;
int nbytes = sizeof(real) * cnv.dim * numb;
check(cudaMemcpyAsync(dst, src, nbytes, cudaMemcpyHostToDevice, str));
}
void norm(int numb, int off, cudaStream_t str = 0) {
dim3 grid(numb);
dim3 block(256);
real *dst = cnvOnDev + off * cnv.dim;
dev::norm_ker<256><<<grid, block, 0, str>>>(dst, cnv.dim);
check();
}
void sim(dim3 ext, dim3 off, cudaStream_t str = 0) {
dim3 block(16, 16);
dim3 grid(ext.x / block.x + 1, ext.y / block.y + 1);
dev::sim_ker<16, 16><<<grid, block, 0, str>>>(cnvOnDev, ext, off, cnv.nvec, cnv.dim, simOnDev);
check();
}
void copySim() {
int nbytes = sizeof(real) * cnv.nvec * cnv.nvec;
check(cudaMemcpy(simFromDev, simOnDev, nbytes, cudaMemcpyDeviceToHost));
}
void overlapped() {
int part1 = cnv.nvec - cnv.nvec / 2, part2 = cnv.nvec / 2;
vector<Stream> streams(3);
cnvOnDev = DevBuffer<real>(cnv.dim * cnv.nvec);
simOnDev = DevBuffer<real>(cnv.nvec * cnv.nvec);
copyToDev(part1, 0, streams[0]);
norm(part1, 0, streams[0]);
sim(dim3(part1, part1), dim3(0, 0), streams[0]);
copyToDev(part2, part1, streams[1]);
norm(part2, part1, streams[1]);
cudaDeviceSynchronize();
sim(dim3(part1, part2), dim3(0, part1), streams[0]);
sim(dim3(part2, part2), dim3(part1, part1), streams[1]);
sim(dim3(part2, part1), dim3(part1, 0), streams[2]);
cudaDeviceSynchronize();
simFromDev = HostBuffer<real>(cnv.nvec * cnv.nvec);
copySim();
}
void nonOverlapped() {
int chunk = 16;
vector<Stream> streams(cnv.nvec / chunk + 1);
cnvOnDev = DevBuffer<real>(cnv.dim * cnv.nvec);
simOnDev = DevBuffer<real>(cnv.nvec * cnv.nvec);
for (int off = 0, ns = 0; off < cnv.nvec; off += chunk, ++ns) {
int span = min(chunk, cnv.nvec - off);
copyToDev(span, off, streams[ns]);
norm(span, off, streams[ns]);
}
cudaDeviceSynchronize();
sim(dim3(cnv.nvec, cnv.nvec), dim3(0, 0));
simFromDev = HostBuffer<real>(cnv.nvec * cnv.nvec);
copySim();
}
public:
Main(int argc, char **argv) {
char *filenamePtr = NULL;
debugMode = false;
for (int i = 1; i < argc; ++i) {
if (!debugMode && (!strcmp(argv[i], "-g") || !strcmp(argv[i], "--debug")))
debugMode = true;
else if (!filenamePtr)
filenamePtr = argv[i];
}
if (!filenamePtr) throw invalid_argument("argv");
else filename = filenamePtr;
if (!debugMode)
cerr.setstate(ios::failbit);
}
int run() {
fr = t.measure("CNV");
cnv = CNV(filename.c_str());
fr.resolve();
fr = t.measure("Over", false);
for (int i = 0; i < 50; ++i) {
fr.enter();
overlapped();
fr.leave();
}
fr.resolve();
fr = t.measure("Non-Over", false);
for (int i = 0; i < 50; ++i) {
fr.enter();
nonOverlapped();
fr.leave();
}
fr.resolve();
if (debugMode) {
host::norm(cnv.data, cnv.nvec, cnv.dim);
cnvFromDev = HostBuffer<real>(cnv.nvec * cnv.dim);
int nbytes = sizeof(real) * cnv.nvec * cnv.dim;
check(cudaMemcpy(cnvFromDev, cnvOnDev, nbytes, cudaMemcpyDeviceToHost));
auto c = host::corr(cnvFromDev, cnv.data, cnv.nvec * cnv.dim);
cerr << "Corr [CNV]: " << c << '\n';
cerr << "Stats (simFromDev):\n";
host::stats(simFromDev, cnv.nvec).print();
cerr << "Stats (simOnHost):\n";
host::norm(cnv.data, cnv.nvec, cnv.dim);
simOnHost = HostBuffer<real>(cnv.nvec * cnv.nvec);
host::sim(cnv.data, cnv.nvec, cnv.dim, simOnHost);
host::stats(simOnHost, cnv.nvec).print();
c = host::corr(simFromDev, simOnHost, cnv.nvec * cnv.nvec);
cerr << "Corr [Sim]: " << c << '\n';
}
return EXIT_SUCCESS;
}
};
int main(int argc, char **argv) {
try {
return Main(argc, argv).run();
}
catch (invalid_argument) {
cout << "Wrong number of arguments\n";
cout << "Usage: " << argv[0] << " [-g|--debug] filename\n";
cout << "Exiting\n";
return EXIT_FAILURE;
}
catch (exception &e) {
cerr.clear();
cerr << "[ERROR] Message: " << e.what() << endl;
cout << "Exiting\n";
return EXIT_FAILURE;
}
}
|
3233b20a041419888c9236d84c9f8c5c991b9d21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "cudaVector.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
//-----------------------------------------------------------------------------------
// YUV to RGB colorspace conversion
//-----------------------------------------------------------------------------------
static inline __device__ float clamp( float x ) { return fminf(fmaxf(x, 0.0f), 255.0f); }
// YUV2RGB
template<typename T>
static inline __device__ T YUV2RGB(const uint3& yuvi)
{
const float luma = float(yuvi.x);
const float u = float(yuvi.y) - 512.0f;
const float v = float(yuvi.z) - 512.0f;
const float s = 1.0f / 1024.0f * 255.0f; // TODO clamp for uchar output?
#if 1
return make_vec<T>(clamp((luma + 1.402f * v) * s),
clamp((luma - 0.344f * u - 0.714f * v) * s),
clamp((luma + 1.772f * u) * s), 255);
#else
return make_vec<T>(clamp((luma + 1.140f * v) * s),
clamp((luma - 0.395f * u - 0.581f * v) * s),
clamp((luma + 2.032f * u) * s), 255);
#endif
}
//-----------------------------------------------------------------------------------
// NV12 to RGB
//-----------------------------------------------------------------------------------
template<typename T>
__global__ void NV12ToRGB(uint32_t* srcImage, size_t nSourcePitch,
T* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width )
return; //x = width - 1;
if( y >= height )
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
const uint3 yuvi_0 = make_uint3((yuv101010Pel[0] & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
const uint3 yuvi_1 = make_uint3((yuv101010Pel[1] & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
// YUV to RGB transformation conversion
dstImage[y * width + x] = YUV2RGB<T>(yuvi_0);
dstImage[y * width + x + 1] = YUV2RGB<T>(yuvi_1);
}
template<typename T>
static hipError_t launchNV12ToRGB( void* srcDev, T* dstDev, size_t width, size_t height )
{
if( !srcDev || !dstDev )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
const size_t srcPitch = width * sizeof(uint8_t);
const size_t dstPitch = width * sizeof(T);
const dim3 blockDim(32,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
hipLaunchKernelGGL(( NV12ToRGB<T>), dim3(gridDim), dim3(blockDim), 0, 0, (uint32_t*)srcDev, srcPitch, dstDev, dstPitch, width, height );
return CUDA(hipGetLastError());
}
// cudaNV12ToRGB (uchar3)
hipError_t cudaNV12ToRGB( void* srcDev, uchar3* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<uchar3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGB (float3)
hipError_t cudaNV12ToRGB( void* srcDev, float3* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<float3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (uchar4)
hipError_t cudaNV12ToRGBA( void* srcDev, uchar4* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<uchar4>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (float4)
hipError_t cudaNV12ToRGBA( void* srcDev, float4* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<float4>(srcDev, destDev, width, height);
}
#if 0
// cudaNV12SetupColorspace
hipError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(hipMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return hipErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(hipMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return hipErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return hipSuccess;
}
#endif
| 3233b20a041419888c9236d84c9f8c5c991b9d21.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "cudaVector.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
//-----------------------------------------------------------------------------------
// YUV to RGB colorspace conversion
//-----------------------------------------------------------------------------------
static inline __device__ float clamp( float x ) { return fminf(fmaxf(x, 0.0f), 255.0f); }
// YUV2RGB
template<typename T>
static inline __device__ T YUV2RGB(const uint3& yuvi)
{
const float luma = float(yuvi.x);
const float u = float(yuvi.y) - 512.0f;
const float v = float(yuvi.z) - 512.0f;
const float s = 1.0f / 1024.0f * 255.0f; // TODO clamp for uchar output?
#if 1
return make_vec<T>(clamp((luma + 1.402f * v) * s),
clamp((luma - 0.344f * u - 0.714f * v) * s),
clamp((luma + 1.772f * u) * s), 255);
#else
return make_vec<T>(clamp((luma + 1.140f * v) * s),
clamp((luma - 0.395f * u - 0.581f * v) * s),
clamp((luma + 2.032f * u) * s), 255);
#endif
}
//-----------------------------------------------------------------------------------
// NV12 to RGB
//-----------------------------------------------------------------------------------
template<typename T>
__global__ void NV12ToRGB(uint32_t* srcImage, size_t nSourcePitch,
T* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width )
return; //x = width - 1;
if( y >= height )
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
const uint3 yuvi_0 = make_uint3((yuv101010Pel[0] & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
const uint3 yuvi_1 = make_uint3((yuv101010Pel[1] & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
// YUV to RGB transformation conversion
dstImage[y * width + x] = YUV2RGB<T>(yuvi_0);
dstImage[y * width + x + 1] = YUV2RGB<T>(yuvi_1);
}
template<typename T>
static cudaError_t launchNV12ToRGB( void* srcDev, T* dstDev, size_t width, size_t height )
{
if( !srcDev || !dstDev )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
const size_t srcPitch = width * sizeof(uint8_t);
const size_t dstPitch = width * sizeof(T);
const dim3 blockDim(32,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
NV12ToRGB<T><<<gridDim, blockDim>>>( (uint32_t*)srcDev, srcPitch, dstDev, dstPitch, width, height );
return CUDA(cudaGetLastError());
}
// cudaNV12ToRGB (uchar3)
cudaError_t cudaNV12ToRGB( void* srcDev, uchar3* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<uchar3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGB (float3)
cudaError_t cudaNV12ToRGB( void* srcDev, float3* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<float3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (uchar4)
cudaError_t cudaNV12ToRGBA( void* srcDev, uchar4* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<uchar4>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (float4)
cudaError_t cudaNV12ToRGBA( void* srcDev, float4* destDev, size_t width, size_t height )
{
return launchNV12ToRGB<float4>(srcDev, destDev, width, height);
}
#if 0
// cudaNV12SetupColorspace
cudaError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return cudaErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(cudaMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return cudaErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return cudaSuccess;
}
#endif
|
eaf4db0ab84d1f106286906bc208a38679bd7605.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
//i,j
/*
*xy
*xleftyright
*/
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int n = j * (rtn) + i;
if(&(lt[j])==NULL||&(rt[i])==NULL){
printf("memory error in .cu.\n");
return;// -1;
}
if((lt[j].val[0]==rt[i].val[0])&&(i<rtn)&&(j<ltn)) {
//count1
//if corresponding , count = 1
count[n] = 1;
}
}
}
| eaf4db0ab84d1f106286906bc208a38679bd7605.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
//i,jの方向を間違えないように
/*
*x軸が縦の方向、y軸が横の方向だよ。
*だから、xがleft、yがrightに対応しているよ
*/
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int n = j * (rtn) + i;
if(&(lt[j])==NULL||&(rt[i])==NULL){
printf("memory error in .cu.\n");
return;// -1;
}
if((lt[j].val[0]==rt[i].val[0])&&(i<rtn)&&(j<ltn)) {
//条件に合致する場合、countを1にする。
//if corresponding , count = 1
count[n] = 1;
}
}
}
|
82f28bbcb912cf235e21e05599bb70e30b522367.hip | // !!! This is a file automatically generated by hipify!!!
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "cuda_strided_slice_layer_acc_kernel.cuh"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(StrideSliceV2, LAYER_STRIDED_SLICE_V2);
Status CudaStrideSliceV2LayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs);
if (ret != TNN_OK) {
return ret;
}
CreateTempBuf(5 * sizeof(int));
CreateTempBuf(5 * sizeof(int));
return TNN_OK;
}
Status CudaStrideSliceV2LayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
this->is_reshaped = false;
return TNN_OK;
}
Status CudaStrideSliceV2LayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto input_dims = input_blob->GetBlobDesc().dims;
auto output_dims = output_blob->GetBlobDesc().dims;
if (!this->is_reshaped) {
auto params = dynamic_cast<StrideSliceV2LayerParam *>(param_);
if (!params) {
LOGE("Error: ShuffleLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: ShuffleLayerParam is nil");
}
auto param_begins = params->begins;
auto param_strides = params->strides;
auto axes = params->axes;
std::vector<int> begins(5, 0), strides(5, 1);
for(int i = 0; i < axes.size(); ++i) {
int axis = axes[i];
int begin = param_begins[i];
begins[axis] = begin >= 0? begin : begin + input_dims[axis];
strides[axis] = param_strides[i];
}
std::reverse(begins.begin(), begins.end());
std::reverse(strides.begin(), strides.end());
hipMemcpy(tempbufs_[0].ptr, &(begins[0]), 5 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(tempbufs_[1].ptr, &(strides[0]), 5 * sizeof(int), hipMemcpyHostToDevice);
this->is_reshaped = true;
}
int input_n = input_dims[0];
int input_c = input_dims[1];
int output_c = output_dims[1];
int input_d = 1, output_d = 1;
if(input_dims.size() > 2) {
input_d = input_dims[2];
output_d = output_dims[2];
}
int input_h = 1, output_h = 1;
if(input_dims.size() > 3) {
input_h = input_dims[3];
output_h = output_dims[3];
}
int input_w = 1, output_w = 1;
if(input_dims.size() > 4) {
input_w = input_dims[4];
output_w = output_dims[4];
}
int div_d = output_w * output_h;
int div_c = output_w * output_h * output_d;
int div_n = output_w * output_h * output_d * output_c;
int count = DimsVectorUtils::Count(output_dims);
float* input_data = static_cast<float*>(input_blob->GetHandle().base);
float* output_data = static_cast<float*>(output_blob->GetHandle().base);
return RunStrideSlice(count, input_data, input_c, input_d, input_h, input_w, (const int*)tempbufs_[0].ptr,
(const int*)tempbufs_[1].ptr, output_data, output_c, output_d, output_h, output_w, div_d, div_c, div_n, context_->GetStream());
}
REGISTER_CUDA_ACC(StrideSliceV2, LAYER_STRIDED_SLICE_V2);
} // namespace TNN_NS
| 82f28bbcb912cf235e21e05599bb70e30b522367.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "cuda_strided_slice_layer_acc_kernel.cuh"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(StrideSliceV2, LAYER_STRIDED_SLICE_V2);
Status CudaStrideSliceV2LayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs);
if (ret != TNN_OK) {
return ret;
}
CreateTempBuf(5 * sizeof(int));
CreateTempBuf(5 * sizeof(int));
return TNN_OK;
}
Status CudaStrideSliceV2LayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
this->is_reshaped = false;
return TNN_OK;
}
Status CudaStrideSliceV2LayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto input_dims = input_blob->GetBlobDesc().dims;
auto output_dims = output_blob->GetBlobDesc().dims;
if (!this->is_reshaped) {
auto params = dynamic_cast<StrideSliceV2LayerParam *>(param_);
if (!params) {
LOGE("Error: ShuffleLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: ShuffleLayerParam is nil");
}
auto param_begins = params->begins;
auto param_strides = params->strides;
auto axes = params->axes;
std::vector<int> begins(5, 0), strides(5, 1);
for(int i = 0; i < axes.size(); ++i) {
int axis = axes[i];
int begin = param_begins[i];
begins[axis] = begin >= 0? begin : begin + input_dims[axis];
strides[axis] = param_strides[i];
}
std::reverse(begins.begin(), begins.end());
std::reverse(strides.begin(), strides.end());
cudaMemcpy(tempbufs_[0].ptr, &(begins[0]), 5 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(tempbufs_[1].ptr, &(strides[0]), 5 * sizeof(int), cudaMemcpyHostToDevice);
this->is_reshaped = true;
}
int input_n = input_dims[0];
int input_c = input_dims[1];
int output_c = output_dims[1];
int input_d = 1, output_d = 1;
if(input_dims.size() > 2) {
input_d = input_dims[2];
output_d = output_dims[2];
}
int input_h = 1, output_h = 1;
if(input_dims.size() > 3) {
input_h = input_dims[3];
output_h = output_dims[3];
}
int input_w = 1, output_w = 1;
if(input_dims.size() > 4) {
input_w = input_dims[4];
output_w = output_dims[4];
}
int div_d = output_w * output_h;
int div_c = output_w * output_h * output_d;
int div_n = output_w * output_h * output_d * output_c;
int count = DimsVectorUtils::Count(output_dims);
float* input_data = static_cast<float*>(input_blob->GetHandle().base);
float* output_data = static_cast<float*>(output_blob->GetHandle().base);
return RunStrideSlice(count, input_data, input_c, input_d, input_h, input_w, (const int*)tempbufs_[0].ptr,
(const int*)tempbufs_[1].ptr, output_data, output_c, output_d, output_h, output_w, div_d, div_c, div_n, context_->GetStream());
}
REGISTER_CUDA_ACC(StrideSliceV2, LAYER_STRIDED_SLICE_V2);
} // namespace TNN_NS
|
e5dbd63e36424999d735cd812321fb692b445455.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 40960;
int dimensigrid = 320;
int dimensiblok = 128;
int sizebig = 2;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, uint *minbuff, big *mulbuff) {
//printf("res adlaah tanga\n");
// BLok 1 Cipher
modexp(g,k,p,res,minbuff,mulbuff);
//printf("res 0 val 0 %u\n", res->value[0]);
// Blok 2 Cipher
modexp(y, k, p, res + 1,minbuff,mulbuff);
kali(res + 1, m, mulbuff);
modulo(mulbuff, p, res+1, minbuff);
//printf("res 1 val 0 %u\n", (res+1)->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kernelenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 2;
// int banyakdata = 256;
__shared__ big sm[128];
__shared__ big sk[128];
__shared__ big smulbuff[128];
__shared__ big sres[256];
__shared__ big sp;
__shared__ big sg;
__shared__ big sy;
__shared__ uint s[2400];
uint *spval = s;
uint *sgval = (uint*)&spval[sizebig];
uint *syval = (uint*)&sgval[sizebig];
uint *sresval = (uint*)&syval[sizebig];
uint *smulbuffval = (uint*)&sresval[2*sizebig*128*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *smval = (uint*)&smulbuffval[2*sizebig*128];
uint *skval = (uint*)&smval[sizebig*128];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
sgval[i] = g[i];
syval[i] = y[i];
smval[jdx*sizebig+i] = m[idx*sizebig + i];
skval[jdx*sizebig+i] = k[idx*sizebig + i];
}
sp.size = sizebig;
sg.size = sizebig;
sy.size = sizebig;
sm[jdx].size = sizebig;
sk[jdx].size = sizebig;
sp.value = spval;
sg.value = sgval;
sy.value = syval;
sm[jdx].value = (uint*)&smval[jdx*sizebig];
sk[jdx].value = (uint*)&skval[jdx*sizebig];
sres[2*jdx].value = (uint*)&sresval[jdx*sizebig*4];
sres[2*jdx+1].value = (uint*)&sresval[jdx*sizebig*4+sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[2*idx] = sres[2*jdx].size;
ressize[2*idx + 1] = sres[2*jdx + 1].size;
for (int i = 0; i < sres[2*jdx].size; i++)
{
resval[2 * idx * sizebig * 2 + i] = sres[2*jdx].value[i];
}
for (int i = 0; i < sres[2*jdx+1].size; i++)
{
resval[(2 * idx + 1)* sizebig * 2 + i] = sres[2*jdx+1].value[i];
}
}
void CUDAenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *devg, *devy, *devm, *devk, *devresval, *buffmin, *buffmul;
big *bigp, *bigg, *bigy, *bigm, *bigk, *bigres, *bigmul;
hipMalloc((void**)&bigp, sizeof(big));
hipMalloc((void**)&bigg, sizeof(big));
hipMalloc((void**)&bigy, sizeof(big));
hipMalloc((void**)&bigm, banyakdata * sizeof(big));
hipMalloc((void**)&bigk, banyakdata * sizeof(big));
hipMalloc((void**)&bigres, 2 * banyakdata * sizeof(big));
hipMalloc((void**)&bigmul,banyakdata * sizeof(big));
hipMalloc((void**)&devp, sizebig * sizeof(uint));
hipMalloc((void**)&devg, sizebig * sizeof(uint));
hipMalloc((void**)&devy, sizebig * sizeof(uint));
hipMalloc((void**)&devm, banyakdata * sizebig * sizeof(uint));
hipMalloc((void**)&devk, banyakdata * sizebig * sizeof(uint));
hipMalloc((void**)&devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint));
hipMalloc((void**)&devressize, 2 * banyakdata * sizeof(char));
hipMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
hipMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
hipMemcpy(devp, p, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devg, g, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devy, y, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devm, m, banyakdata * sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devk, k, banyakdata * sizebig * sizeof(uint), hipMemcpyHostToDevice);
for (int i = 0; i < banyakdata; i++)
{
}
kernelenk << <dimensigrid, dimensiblok >> >(devp, devg, devy, devm, devk, devresval, devressize, buffmin, buffmul);
hipDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
hipMemcpy(ressize, devressize, 2 * banyakdata, hipMemcpyDeviceToHost);
hipMemcpy(resval, devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint), hipMemcpyDeviceToHost);
hipFree(devp);
hipFree(devg);
hipFree(devy);
hipFree(devm);
hipFree(devk);
hipFree(devresval);
hipFree(devressize);
hipFree(buffmin);
hipFree(buffmul);
}
void init(uint *pval, uint *gval, uint *yval, uint *mval, uint *kval){
srand(2018);
big *p, *g, *x, *y;
p = (big*)malloc(sizeof(big));
g = (big*)malloc(sizeof(big));
x = (big*)malloc(sizeof(big));
y = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 0; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
// Kunci publik g
g->size = sizebig;
g->value = gval;
for (int i = 0; i < g->size; i++)
{
// g->value[i] = 2;
g->value[i] = rand() % UINT_MAX;
}
// Kunci privat x
x->size = sizebig;
x->value = (uint*) malloc(x->size * sizeof(uint));
for (int i = 0; i < x->size; i++)
{
// x->value[i] = 1751;
x->value[i] = rand() % UINT_MAX;
}
// Cari nilai kunci publik y = (g^x) mod p
big* mulbuff = (big*) malloc(sizeof(big));
mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2);
uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2);
y->value = yval;
carikunciy(g,x,p,y,minbuff,mulbuff);
// printf("y size %d : %u\n", y->size, y->value[0]);
//========================================================//
// Blok plainteks dan k
for(int i = 0 ; i < banyakdata * sizebig ; i++){
// mval[i] = 1001;
mval[i] = rand() % UINT_MAX;
// kval[i] = 77;
kval[i] = rand() % UINT_MAX;
}
}
int main(){
char *ressize;
uint *p, *g, *y, *m, *k, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
g = (uint*) malloc(sizebig * sizeof(uint));
y = (uint*) malloc(sizebig * sizeof(uint));
m = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
k = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(2 * banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(2 * banyakdata * sizeof(char));
init(p,g,y,m,k);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,g,y,m,k,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Cipher %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Cipher ... : ...\n");
// printf("Cipher %d size %d : %u\n",banyakdata*2-2, ressize[banyakdata*2-2], resval[(banyakdata*2-2) * 2 * sizebig]);
// printf("Cipher %d size %d : %u\n",banyakdata*2-1, ressize[banyakdata*2-1], resval[(banyakdata*2-1) * 2 * sizebig]);
free(p);
free(g);
free(y);
free(m);
free(k);
free(resval);
free(ressize);
return 0;
}
| e5dbd63e36424999d735cd812321fb692b445455.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 40960;
int dimensigrid = 320;
int dimensiblok = 128;
int sizebig = 2;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, uint *minbuff, big *mulbuff) {
//printf("res adlaah tanga\n");
// BLok 1 Cipher
modexp(g,k,p,res,minbuff,mulbuff);
//printf("res 0 val 0 %u\n", res->value[0]);
// Blok 2 Cipher
modexp(y, k, p, res + 1,minbuff,mulbuff);
kali(res + 1, m, mulbuff);
modulo(mulbuff, p, res+1, minbuff);
//printf("res 1 val 0 %u\n", (res+1)->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kernelenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 2;
// int banyakdata = 256;
__shared__ big sm[128];
__shared__ big sk[128];
__shared__ big smulbuff[128];
__shared__ big sres[256];
__shared__ big sp;
__shared__ big sg;
__shared__ big sy;
__shared__ uint s[2400];
uint *spval = s;
uint *sgval = (uint*)&spval[sizebig];
uint *syval = (uint*)&sgval[sizebig];
uint *sresval = (uint*)&syval[sizebig];
uint *smulbuffval = (uint*)&sresval[2*sizebig*128*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *smval = (uint*)&smulbuffval[2*sizebig*128];
uint *skval = (uint*)&smval[sizebig*128];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
sgval[i] = g[i];
syval[i] = y[i];
smval[jdx*sizebig+i] = m[idx*sizebig + i];
skval[jdx*sizebig+i] = k[idx*sizebig + i];
}
sp.size = sizebig;
sg.size = sizebig;
sy.size = sizebig;
sm[jdx].size = sizebig;
sk[jdx].size = sizebig;
sp.value = spval;
sg.value = sgval;
sy.value = syval;
sm[jdx].value = (uint*)&smval[jdx*sizebig];
sk[jdx].value = (uint*)&skval[jdx*sizebig];
sres[2*jdx].value = (uint*)&sresval[jdx*sizebig*4];
sres[2*jdx+1].value = (uint*)&sresval[jdx*sizebig*4+sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[2*idx] = sres[2*jdx].size;
ressize[2*idx + 1] = sres[2*jdx + 1].size;
for (int i = 0; i < sres[2*jdx].size; i++)
{
resval[2 * idx * sizebig * 2 + i] = sres[2*jdx].value[i];
}
for (int i = 0; i < sres[2*jdx+1].size; i++)
{
resval[(2 * idx + 1)* sizebig * 2 + i] = sres[2*jdx+1].value[i];
}
}
void CUDAenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *devg, *devy, *devm, *devk, *devresval, *buffmin, *buffmul;
big *bigp, *bigg, *bigy, *bigm, *bigk, *bigres, *bigmul;
cudaMalloc((void**)&bigp, sizeof(big));
cudaMalloc((void**)&bigg, sizeof(big));
cudaMalloc((void**)&bigy, sizeof(big));
cudaMalloc((void**)&bigm, banyakdata * sizeof(big));
cudaMalloc((void**)&bigk, banyakdata * sizeof(big));
cudaMalloc((void**)&bigres, 2 * banyakdata * sizeof(big));
cudaMalloc((void**)&bigmul,banyakdata * sizeof(big));
cudaMalloc((void**)&devp, sizebig * sizeof(uint));
cudaMalloc((void**)&devg, sizebig * sizeof(uint));
cudaMalloc((void**)&devy, sizebig * sizeof(uint));
cudaMalloc((void**)&devm, banyakdata * sizebig * sizeof(uint));
cudaMalloc((void**)&devk, banyakdata * sizebig * sizeof(uint));
cudaMalloc((void**)&devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint));
cudaMalloc((void**)&devressize, 2 * banyakdata * sizeof(char));
cudaMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
cudaMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
cudaMemcpy(devp, p, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devg, g, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devy, y, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devm, m, banyakdata * sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devk, k, banyakdata * sizebig * sizeof(uint), cudaMemcpyHostToDevice);
for (int i = 0; i < banyakdata; i++)
{
}
kernelenk << <dimensigrid, dimensiblok >> >(devp, devg, devy, devm, devk, devresval, devressize, buffmin, buffmul);
cudaDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
cudaMemcpy(ressize, devressize, 2 * banyakdata, cudaMemcpyDeviceToHost);
cudaMemcpy(resval, devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint), cudaMemcpyDeviceToHost);
cudaFree(devp);
cudaFree(devg);
cudaFree(devy);
cudaFree(devm);
cudaFree(devk);
cudaFree(devresval);
cudaFree(devressize);
cudaFree(buffmin);
cudaFree(buffmul);
}
void init(uint *pval, uint *gval, uint *yval, uint *mval, uint *kval){
srand(2018);
big *p, *g, *x, *y;
p = (big*)malloc(sizeof(big));
g = (big*)malloc(sizeof(big));
x = (big*)malloc(sizeof(big));
y = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 0; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
// Kunci publik g
g->size = sizebig;
g->value = gval;
for (int i = 0; i < g->size; i++)
{
// g->value[i] = 2;
g->value[i] = rand() % UINT_MAX;
}
// Kunci privat x
x->size = sizebig;
x->value = (uint*) malloc(x->size * sizeof(uint));
for (int i = 0; i < x->size; i++)
{
// x->value[i] = 1751;
x->value[i] = rand() % UINT_MAX;
}
// Cari nilai kunci publik y = (g^x) mod p
big* mulbuff = (big*) malloc(sizeof(big));
mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2);
uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2);
y->value = yval;
carikunciy(g,x,p,y,minbuff,mulbuff);
// printf("y size %d : %u\n", y->size, y->value[0]);
//========================================================//
// Blok plainteks dan k
for(int i = 0 ; i < banyakdata * sizebig ; i++){
// mval[i] = 1001;
mval[i] = rand() % UINT_MAX;
// kval[i] = 77;
kval[i] = rand() % UINT_MAX;
}
}
int main(){
char *ressize;
uint *p, *g, *y, *m, *k, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
g = (uint*) malloc(sizebig * sizeof(uint));
y = (uint*) malloc(sizebig * sizeof(uint));
m = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
k = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(2 * banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(2 * banyakdata * sizeof(char));
init(p,g,y,m,k);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,g,y,m,k,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Cipher %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Cipher ... : ...\n");
// printf("Cipher %d size %d : %u\n",banyakdata*2-2, ressize[banyakdata*2-2], resval[(banyakdata*2-2) * 2 * sizebig]);
// printf("Cipher %d size %d : %u\n",banyakdata*2-1, ressize[banyakdata*2-1], resval[(banyakdata*2-1) * 2 * sizebig]);
free(p);
free(g);
free(y);
free(m);
free(k);
free(resval);
free(ressize);
return 0;
}
|
a7269f2fd142d2eb02ced975bb3033084b5de632.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void dset_both_kernel(double *vals, int N, double mu, float sd)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu + sd;
} | a7269f2fd142d2eb02ced975bb3033084b5de632.cu | #include "includes.h"
__global__ void dset_both_kernel(double *vals, int N, double mu, float sd)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu + sd;
} |
74a103df9f786aad6f458d4c00a5d8a516ab5893.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <numpy/arrayobject.h>
#include <assert.h>
//#include <cutil_inline.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
| 74a103df9f786aad6f458d4c00a5d8a516ab5893.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <numpy/arrayobject.h>
#include <assert.h>
//#include <cutil_inline.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
b61b55012d761c48134a471c84e44115b02571a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data;
hipblasHandle_t handle;
GpuData(const T *orig_data) : orig_data(orig_data) {
hipblasCreate(&handle);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
hipblasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
hipblasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n), _data(0) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// Set GPU specific _info.
GpuData<T> *info = new GpuData<T>(data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::~MatrixDense() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init && _data) {
hipFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
// Copy Matrix to GPU.
hipMalloc(&_data, this->_m * this->_n * sizeof(T));
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),
hipMemcpyHostToDevice);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
hipMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
hipDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
hipDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
hipDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
hipDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
hipFree(sign);
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(POGS_DOUBLE) || POGS_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(POGS_SINGLE) || POGS_SINGLE==1
template class MatrixDense<float>;
#endif
} // namespace pogs
| b61b55012d761c48134a471c84e44115b02571a3.cu | #include <cublas_v2.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data;
cublasHandle_t handle;
GpuData(const T *orig_data) : orig_data(orig_data) {
cublasCreate(&handle);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
cublasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
cublasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? CUBLAS_OP_N : CUBLAS_OP_T;
}
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n), _data(0) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// Set GPU specific _info.
GpuData<T> *info = new GpuData<T>(data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::~MatrixDense() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init && _data) {
cudaFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
// Copy Matrix to GPU.
cudaMalloc(&_data, this->_m * this->_n * sizeof(T));
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),
cudaMemcpyHostToDevice);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
cudaMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SquareF<T>());
} else {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
cudaDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SqrtF<T>());
} else {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
cudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
cudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
cudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
cudaFree(sign);
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultCol<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(POGS_DOUBLE) || POGS_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(POGS_SINGLE) || POGS_SINGLE==1
template class MatrixDense<float>;
#endif
} // namespace pogs
|
7ecb1038a15fb05e686224906dc542486fa36841.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mat_gpu.h"
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* targets, const int imgSizeX, const int imgSizeY, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const int outputsY, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSizeX + blockPxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsY, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
targets += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX &&
blockPxY >= startX && blockPxY < startX + strideX * (outputsY-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSizeY, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSizeX, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* targets,
const int imgSizeX, const int imgSizeY, const int numFilters, const int numImages,
const int subsX, const int startX, const int strideX, const int outputsX,
const int outputsY, const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSizeX + blockPxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsY, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
targets += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX &&
blockPxY >= startX && blockPxY < startX + strideX * (outputsY-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void _convLocalAvgUndo(MatGPU& avgGrads, MatGPU& targets,
size_t imgSize1, size_t imgSize2, size_t scale, size_t stride) {
int subsX = (int) scale;
int strideX = (int) stride;
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int startX = 0;
int outputsY = DIVUP(imgSizeY, strideX);
int outputsX = DIVUP(imgSizeX, strideX);
int outputs = outputsX * outputsY;
ftype scaleTargets = 0;
ftype scaleOutput = 1;
mexAssert(avgGrads.stride_ == 1 && targets.stride_ == 1,
"In convLocalAvgUndo one of strides is not 1");
int numImages = (int) avgGrads.size1_;
mexAssert(avgGrads.size2_ % outputs == 0, "au1");
int numFilters = (int) avgGrads.size2_ / outputs;
mexAssert(targets.size1_ == numImages, "au2");
mexAssert(targets.size2_ == imgPixels * numFilters, "au3");
mexAssert(numFilters % 16 == 0, "au4");
mexAssert(strideX <= subsX, "au5");
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSizeX, (numFilters / (4 * 4)) * imgSizeY);
hipStream_t stream = MatGPU::_defaultStream;
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
}
mexAssert(hipGetLastError() == hipSuccess, "convLocalAvgUndo: kernel execution failed");
}
/*
* imgs: (numFilters, imgPixels, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*/
void _convLocalMaxUndo(MatGPU& images, MatGPU& maxActs, MatGPU& maxGrads, MatGPU& targets, size_t imgSize1, size_t imgSize2, size_t scale, size_t stride) {
int subsX = (int) scale;
int strideX = (int) stride;
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int startX = 0;
int outputsY = DIVUP(imgSizeY, strideX);
int outputsX = DIVUP(imgSizeX, strideX);
int outputs = outputsX * outputsY;
ftype scaleTargets = 0;
ftype scaleOutput = 1;
mexAssert(images.stride_ == 1 && maxActs.stride_ == 1 &&
maxGrads.stride_ == 1 && targets.stride_ == 1,
"In _convLocalMaxUndo one of strides is not 1");
int numImages = (int) maxActs.size1_;
mexAssert(maxActs.size2_ % outputs == 0, "mu1");
int numFilters = (int) maxActs.size2_ / outputs;
mexAssert(targets.size1_ == numImages, "mu2");
mexAssert(targets.size2_ == imgPixels * numFilters, "mu3");
mexAssert(images.size1_ == targets.size1_ &&
images.size2_ == targets.size2_, "mu4");
mexAssert(maxActs.size1_ == maxGrads.size1_ &&
maxActs.size2_ == maxGrads.size2_, "mu5");
mexAssert(numFilters % 16 == 0, "mu6");
mexAssert(strideX <= subsX, "mu7");
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSizeX, (numFilters / (4 * 2)) * imgSizeY);
hipStream_t stream = MatGPU::_defaultStream;
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
}
mexAssert(hipGetLastError() == hipSuccess, "convLocalMaxUndo: kernel execution failed");
}
| 7ecb1038a15fb05e686224906dc542486fa36841.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mat_gpu.h"
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* targets, const int imgSizeX, const int imgSizeY, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const int outputsY, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSizeX + blockPxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsY, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
targets += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX &&
blockPxY >= startX && blockPxY < startX + strideX * (outputsY-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSizeY, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSizeX, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* targets,
const int imgSizeX, const int imgSizeY, const int numFilters, const int numImages,
const int subsX, const int startX, const int strideX, const int outputsX,
const int outputsY, const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSizeX + blockPxX;
const int numOutputs = outputsX * outputsY;
const int imgPixels = imgSizeX * imgSizeY;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsY, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
targets += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX &&
blockPxY >= startX && blockPxY < startX + strideX * (outputsY-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void _convLocalAvgUndo(MatGPU& avgGrads, MatGPU& targets,
size_t imgSize1, size_t imgSize2, size_t scale, size_t stride) {
int subsX = (int) scale;
int strideX = (int) stride;
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int startX = 0;
int outputsY = DIVUP(imgSizeY, strideX);
int outputsX = DIVUP(imgSizeX, strideX);
int outputs = outputsX * outputsY;
ftype scaleTargets = 0;
ftype scaleOutput = 1;
mexAssert(avgGrads.stride_ == 1 && targets.stride_ == 1,
"In convLocalAvgUndo one of strides is not 1");
int numImages = (int) avgGrads.size1_;
mexAssert(avgGrads.size2_ % outputs == 0, "au1");
int numFilters = (int) avgGrads.size2_ / outputs;
mexAssert(targets.size1_ == numImages, "au2");
mexAssert(targets.size2_ == imgPixels * numFilters, "au3");
mexAssert(numFilters % 16 == 0, "au4");
mexAssert(strideX <= subsX, "au5");
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSizeX, (numFilters / (4 * 4)) * imgSizeY);
cudaStream_t stream = MatGPU::_defaultStream;
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
}
mexAssert(cudaGetLastError() == cudaSuccess, "convLocalAvgUndo: kernel execution failed");
}
/*
* imgs: (numFilters, imgPixels, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* targets: (numFilters, imgPixels, numImages)
*/
void _convLocalMaxUndo(MatGPU& images, MatGPU& maxActs, MatGPU& maxGrads, MatGPU& targets, size_t imgSize1, size_t imgSize2, size_t scale, size_t stride) {
int subsX = (int) scale;
int strideX = (int) stride;
int imgSizeX = (int) imgSize1;
int imgSizeY = (int) imgSize2;
int imgPixels = imgSizeX * imgSizeY;
int startX = 0;
int outputsY = DIVUP(imgSizeY, strideX);
int outputsX = DIVUP(imgSizeX, strideX);
int outputs = outputsX * outputsY;
ftype scaleTargets = 0;
ftype scaleOutput = 1;
mexAssert(images.stride_ == 1 && maxActs.stride_ == 1 &&
maxGrads.stride_ == 1 && targets.stride_ == 1,
"In _convLocalMaxUndo one of strides is not 1");
int numImages = (int) maxActs.size1_;
mexAssert(maxActs.size2_ % outputs == 0, "mu1");
int numFilters = (int) maxActs.size2_ / outputs;
mexAssert(targets.size1_ == numImages, "mu2");
mexAssert(targets.size2_ == imgPixels * numFilters, "mu3");
mexAssert(images.size1_ == targets.size1_ &&
images.size2_ == targets.size2_, "mu4");
mexAssert(maxActs.size1_ == maxGrads.size1_ &&
maxActs.size2_ == maxGrads.size2_, "mu5");
mexAssert(numFilters % 16 == 0, "mu6");
mexAssert(strideX <= subsX, "mu7");
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSizeX, (numFilters / (4 * 2)) * imgSizeY);
cudaStream_t stream = MatGPU::_defaultStream;
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads, 0, stream>>>(images.data_, maxGrads.data_, maxActs.data_, targets.data_, imgSizeX, imgSizeY, numFilters, numImages, subsX, startX, strideX, outputsX, outputsY, scaleTargets, scaleOutput);
}
}
}
mexAssert(cudaGetLastError() == cudaSuccess, "convLocalMaxUndo: kernel execution failed");
}
|
0d457eb9c9ae771d347a46995f7f7df8c0b4d4e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T* sharedMem;
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen, *coordsMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<T*>(shmem);
coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T));
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong* coords = coordsMem + threadIdx.x * xRank;
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
///////////////////////////////////////////////////////////////////
void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets; // xRank = zRank
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, zShapeInfo + 1, i, len, coords);
const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag);
}
///////////////////////////////////////////////////////////////////
void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto memBuff = sharedMem + threadIdx.x * 2 * xRank;
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem);
}
//////////////////////////////////////////////////////////////////////////
void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPWholeArrCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= shape::length(zShapeInfo))
return;
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto reducBuff = reinterpret_cast<Z*>(vreducBuff);
uint* count = reinterpret_cast<uint*>(vreducBuff) + 16384;
__shared__ Z* shMem;
__shared__ Nd4jLong len;
__shared__ bool amIinLastBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shMem = reinterpret_cast<Z*>(shmem);
len = shape::length(zShapeInfo); // xLen = yLen = zLen
}
__syncthreads();
// fill shared memory with array elements
const auto xVal = x[shape::getIndexOffset(tid, xShapeInfo, len)];
const auto yVal = y[shape::getIndexOffset(tid, yShapeInfo, len)];
shMem[2*threadIdx.x] = static_cast<Z>(xVal * xVal); // for norm
shMem[2*threadIdx.x + 1] = static_cast<Z>(xVal * yVal); // for input * gradO
__syncthreads();
// accumulate sum per block
for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && tid + activeThreads < len) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
// store accumulated sums in reduction buffer (reducBuff)
if (threadIdx.x == 0) {
reducBuff[2*blockIdx.x] = shMem[0];
reducBuff[2*blockIdx.x + 1] = shMem[1];
__threadfence();
amIinLastBlock = gridDim.x == 1 || (atomicInc(count, gridDim.x) == gridDim.x - 1);
}
__syncthreads();
// shared memory of last block is used for final summation of values stored in reduction buffer
if (amIinLastBlock) {
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
shMem[2*threadIdx.x] = (i == threadIdx.x ) ? reducBuff[2*i] : reducBuff[2*i] + shMem[2*threadIdx.x];
shMem[2*threadIdx.x + 1] = (i == threadIdx.x ) ? reducBuff[2*i + 1] : reducBuff[2*i + 1] + shMem[2*threadIdx.x + 1];
}
__syncthreads();
// accumulate sum
for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < gridDim.x) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
if (threadIdx.x == 0) {
reducBuff[0] = math::nd4j_sqrt<Z,Z>(shMem[0]);
reducBuff[1] = shMem[1];
count = 0;
}
}
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPCalcGradCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const Nd4jLong len = shape::length(zShapeInfo); // xLen = yLen = zLen
if(tid >= len)
return;
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Z norm, sumOfProd;
if (threadIdx.x == 0) {
norm = reinterpret_cast<Z*>(vreducBuff)[0];
sumOfProd = reinterpret_cast<Z*>(vreducBuff)[1];
}
__syncthreads();
const auto yOffset = shape::getIndexOffset(tid, yShapeInfo, len);
const auto zOffset = shape::getIndexOffset(tid, zShapeInfo, len);
if(norm > clipNormVal) {
const auto xOffset = shape::getIndexOffset(tid, xShapeInfo, len);
const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm
const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm)
z[zOffset] = clipNormVal * (factor1 * y[yOffset] - factor2 * sumOfProd * x[xOffset]);
}
else {
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPTadsCuda(const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yTadShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const Z clipNormVal) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Z* shMem;
__shared__ Nd4jLong tadLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shMem = reinterpret_cast<Z*>(shmem);
tadLen = shape::length(zTadShapeInfo); // xTadLen = yTadLen = zTadLen
}
__syncthreads();
const auto* xTad = x + xTadOffsets[blockIdx.x];
const auto* yTad = y + yTadOffsets[blockIdx.x];
auto* zTad = z + zTadOffsets[blockIdx.x];
// *** FIRST STAGE - ACCUMULATE REQUIRED SUMS *** //
Z norm = 0;
Z sumOfProd = 0;
for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen);
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen);
shMem[2*threadIdx.x] = static_cast<Z>(xTad[xOffset] * xTad[xOffset]); // for norm
shMem[2*threadIdx.x + 1] = static_cast<Z>(xTad[xOffset] * yTad[yOffset]); // for input * gradO
__syncthreads();
// accumulate sum per block
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && i + activeThreads < tadLen) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
norm += shMem[0];
sumOfProd += shMem[1];
}
// *** SECOND STAGE - GRADIENT CALCULATION *** //
norm = math::nd4j_sqrt<Z,Z>(norm);
for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) {
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen);
const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo, tadLen);
if(norm > clipNormVal) {
const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen);
const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm
const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm)
zTad[zOffset] = clipNormVal * (factor1 * yTad[yOffset] - factor2 * sumOfProd * xTad[xOffset]);
}
else {
zTad[zOffset] = yTad[yOffset];
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void clipByNormBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets,
const void* vy, const Nd4jLong* yShapeInfo, const Nd4jLong* yTadOffsets,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets,
void* vreducBuff, const double clipNormVal) {
if(xTadOffsets == nullptr) { // means whole array
hipLaunchKernelGGL(( clipByNormBPWholeArrCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal));
hipLaunchKernelGGL(( clipByNormBPCalcGradCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal));
}
else // means tads using
hipLaunchKernelGGL(( clipByNormBPTadsCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, xTadOffsets, vy, yShapeInfo, yTadOffsets, vz, zShapeInfo, zTadOffsets, static_cast<Z>(clipNormVal));
}
BUILD_DOUBLE_TEMPLATE(template void clipByNormBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong* xTadOffsets, const void *vy, const Nd4jLong *yShapeInfo, const Nd4jLong* yTadOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
PointersManager manager(context, "clipByNormBP");
const double clipNormVal = clipNorm.e<double>(0);
const auto xType = input.dataType();
const auto zType = gradI.dataType();
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int sharedMem = threadsPerBlock * 2 * input.sizeOfT() + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
if(dimensions.empty() || dimensions.size() == input.rankOf()) { // means whole array
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), nullptr, gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), nullptr, gradI.getSpecialBuffer(), gradI.getSpecialShapeInfo(), nullptr, context->getReductionPointer(), clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
}
else { // means tads using
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packY = ConstantTadHelper::getInstance()->tadForDimensions(gradO.getShapeInfo(), dimensions);
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), dimensions);
const int blocksPerGrid = packX.numberOfTads();
BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradO.getSpecialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), gradI.getSpecialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), nullptr, clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
}
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
template <typename T>
static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) {
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
if (i != r) {
T e0 = input[shape::getIndexOffset(i, shape, len)];
T e1 = input[shape::getIndexOffset(r, shape, len)];
//math::nd4j_swap<T>(input(i), input(r));
input[shape::getIndexOffset(i, shape, len)] = e1;
input[shape::getIndexOffset(r, shape, len)] = e0;
}
}
}
template <typename T>
static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) {
// PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)];
if(i != r) {
output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)];
// output.p(r, input.e<T>(indices[i]));
// math::nd4j_swap<int>(indices[i], indices[r]);
atomicExch(&indices[i], indices[r]);
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
// check edge cases first
int temp;
const int firstDim = input.sizeAt(0);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input});
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) {
// apply Fisher-Yates shuffle
nd4j::graph::RandomGenerator* dRandom = nullptr;
hipMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator));
hipMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), hipMemcpyHostToDevice);
T* inputBuf = reinterpret_cast<T*>(input.specialBuffer());
if(isInplace) {
hipLaunchKernelGGL(( swapShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom);
}
else {
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
hipMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), hipMemcpyDeviceToDevice);
//output.p<T>(Nd4jLong(0), input.e<T>(0));
PointersManager pointersManager(context, "helper::randomShuffle_");
int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int)));
T* outputBuf = reinterpret_cast<T*>(output.specialBuffer());
hipLaunchKernelGGL(( fillShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom);
pointersManager.synchronize();
}
// rng.rewindH(firstDim - 1);
hipFree(dRandom);
}
else {
// evaluate sub-arrays list of input array through all dimensions excluding first one
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
auto subArrsListIn = input.allTensorsAlongDimension(dimensions);
// apply Fisher-Yates shuffle
if(isInplace) {
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
if(i != r)
subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r));
}
}
else {
// evaluate sub-arrays list of output array through all dimensions excluding first one
auto subArrsListOut = output.allTensorsAlongDimension(dimensions);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
bool isZeroShuffled = false;
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r]));
if(r == 0)
isZeroShuffled = true;
if(i != r) {
subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i]));
math::nd4j_swap<int>(indices[i], indices[r]);
}
}
if(!isZeroShuffled)
subArrsListOut->at(0)->assign(subArrsListIn->at(0));
delete subArrsListOut;
}
rng.rewindH(firstDim-1);
delete subArrsListIn;
}
NDArray::registerSpecialUse({&output}, {&input});
}
void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void eye(nd4j::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* z;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
z = inputBuffer + inputOffsets[arr];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, len);
if(norm2Buf[arr] > clipNorm)
z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c'
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* x, *z;
__shared__ Nd4jLong lenX, lenZ;
__shared__ T norm2;
if (threadIdx.x == 0) {
lenX = shape::length(shape);
x = inputBuffer + inputOffsets[arr];
z = outputBuffer + outputOffsets[arr];
lenZ = shape::length(outputShape);
norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)];
//printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ);
}
__syncthreads();
for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, lenX);
auto zIndex = shape::getIndexOffset(j, outputShape, lenZ);
if(norm2 > clipNorm) {
z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c'
} else {
z[zIndex] = x[xIndex];
}
//printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]);
}
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) {
const int rank = input.rankOf();
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions);
clipNormA.syncToHost();
//norm2.printBuffer("Norm2");
T const clipNorm = clipNormA.e<T>(0);
//clipNormA.printBuffer("ClipNorm");
auto stream = context->getCudaStream();
if (isInplace) {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
input *= clipNorm / norm2Val;
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
//auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
hipLaunchKernelGGL(( clipByNormInplaceKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
else {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
output.assign( input * (clipNorm / norm2Val));
else
output.assign( input );
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer());
hipLaunchKernelGGL(( clipByNormKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
}
void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
template <typename T>
void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm = NDArrayFactory::create<T>(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list]))
PRAGMA_OMP_PARALLEL_FOR
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, nullptr, nullptr);// = nd4j::math::nd4j_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = clipNorm / globalNorm.e<T>(0);
PRAGMA_OMP_PARALLEL_FOR
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
}
else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, output);
}
}
}
void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
auto cn = clipNorm.e<T>(0);
if (dimensions.size() == 0) {
// all-reduce
T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf();
if (n2 <= cn) {
if (!isInplace)
output.assign(input);
}
else {
const T factor = cn / n2;
//auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
//input.applyLambda<T>(lambda, &output);
output.assign(input * factor);
}
}
else {
// along dimension
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false);
if (!isInplace)
output.assign(input);
auto tads = output.allTensorsAlongDimension(dimensions);
auto outTads = output.allTensorsAlongDimension(dimensions);
// TODO: make this CUDA-compliant somehow
for (int e = 0; e < tads->size(); e++) {
T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf();
const T factor = cn / n2;
if (n2 > cn) {
//auto lambda = LAMBDA_T(_x, factor) {return _x * factor;};
tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output);
}
}
delete tads;
delete outTads;
}
}
void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
/*
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
*/
template <typename T>
static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape, length);
auto outputOffset = shape::getIndexOffset(e, outputShape, length);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
hipLaunchKernelGGL(( clipByValueKernel<T>), dim3(256), dim3(512), 8192, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
}
}
}
| 0d457eb9c9ae771d347a46995f7f7df8c0b4d4e6.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T* sharedMem;
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen, *coordsMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<T*>(shmem);
coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T));
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong* coords = coordsMem + threadIdx.x * xRank;
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
///////////////////////////////////////////////////////////////////
void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets; // xRank = zRank
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(rank, zShapeInfo + 1, i, len, coords);
const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag);
}
///////////////////////////////////////////////////////////////////
void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto memBuff = sharedMem + threadIdx.x * 2 * xRank;
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem);
}
//////////////////////////////////////////////////////////////////////////
void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPWholeArrCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= shape::length(zShapeInfo))
return;
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto reducBuff = reinterpret_cast<Z*>(vreducBuff);
uint* count = reinterpret_cast<uint*>(vreducBuff) + 16384;
__shared__ Z* shMem;
__shared__ Nd4jLong len;
__shared__ bool amIinLastBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shMem = reinterpret_cast<Z*>(shmem);
len = shape::length(zShapeInfo); // xLen = yLen = zLen
}
__syncthreads();
// fill shared memory with array elements
const auto xVal = x[shape::getIndexOffset(tid, xShapeInfo, len)];
const auto yVal = y[shape::getIndexOffset(tid, yShapeInfo, len)];
shMem[2*threadIdx.x] = static_cast<Z>(xVal * xVal); // for norm
shMem[2*threadIdx.x + 1] = static_cast<Z>(xVal * yVal); // for input * gradO
__syncthreads();
// accumulate sum per block
for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && tid + activeThreads < len) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
// store accumulated sums in reduction buffer (reducBuff)
if (threadIdx.x == 0) {
reducBuff[2*blockIdx.x] = shMem[0];
reducBuff[2*blockIdx.x + 1] = shMem[1];
__threadfence();
amIinLastBlock = gridDim.x == 1 || (atomicInc(count, gridDim.x) == gridDim.x - 1);
}
__syncthreads();
// shared memory of last block is used for final summation of values stored in reduction buffer
if (amIinLastBlock) {
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
shMem[2*threadIdx.x] = (i == threadIdx.x ) ? reducBuff[2*i] : reducBuff[2*i] + shMem[2*threadIdx.x];
shMem[2*threadIdx.x + 1] = (i == threadIdx.x ) ? reducBuff[2*i + 1] : reducBuff[2*i + 1] + shMem[2*threadIdx.x + 1];
}
__syncthreads();
// accumulate sum
for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < gridDim.x) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
if (threadIdx.x == 0) {
reducBuff[0] = math::nd4j_sqrt<Z,Z>(shMem[0]);
reducBuff[1] = shMem[1];
count = 0;
}
}
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPCalcGradCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const Nd4jLong len = shape::length(zShapeInfo); // xLen = yLen = zLen
if(tid >= len)
return;
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Z norm, sumOfProd;
if (threadIdx.x == 0) {
norm = reinterpret_cast<Z*>(vreducBuff)[0];
sumOfProd = reinterpret_cast<Z*>(vreducBuff)[1];
}
__syncthreads();
const auto yOffset = shape::getIndexOffset(tid, yShapeInfo, len);
const auto zOffset = shape::getIndexOffset(tid, zShapeInfo, len);
if(norm > clipNormVal) {
const auto xOffset = shape::getIndexOffset(tid, xShapeInfo, len);
const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm
const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm)
z[zOffset] = clipNormVal * (factor1 * y[yOffset] - factor2 * sumOfProd * x[xOffset]);
}
else {
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
// x - input, y - gradO, z - gradI
template<typename X, typename Z>
__global__ static void clipByNormBPTadsCuda(const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yTadShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const Z clipNormVal) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Z*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Z* shMem;
__shared__ Nd4jLong tadLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
shMem = reinterpret_cast<Z*>(shmem);
tadLen = shape::length(zTadShapeInfo); // xTadLen = yTadLen = zTadLen
}
__syncthreads();
const auto* xTad = x + xTadOffsets[blockIdx.x];
const auto* yTad = y + yTadOffsets[blockIdx.x];
auto* zTad = z + zTadOffsets[blockIdx.x];
// *** FIRST STAGE - ACCUMULATE REQUIRED SUMS *** //
Z norm = 0;
Z sumOfProd = 0;
for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen);
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen);
shMem[2*threadIdx.x] = static_cast<Z>(xTad[xOffset] * xTad[xOffset]); // for norm
shMem[2*threadIdx.x + 1] = static_cast<Z>(xTad[xOffset] * yTad[yOffset]); // for input * gradO
__syncthreads();
// accumulate sum per block
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads && i + activeThreads < tadLen) {
shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)];
shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1];
}
__syncthreads();
}
norm += shMem[0];
sumOfProd += shMem[1];
}
// *** SECOND STAGE - GRADIENT CALCULATION *** //
norm = math::nd4j_sqrt<Z,Z>(norm);
for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) {
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen);
const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo, tadLen);
if(norm > clipNormVal) {
const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen);
const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm
const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm)
zTad[zOffset] = clipNormVal * (factor1 * yTad[yOffset] - factor2 * sumOfProd * xTad[xOffset]);
}
else {
zTad[zOffset] = yTad[yOffset];
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void clipByNormBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets,
const void* vy, const Nd4jLong* yShapeInfo, const Nd4jLong* yTadOffsets,
void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets,
void* vreducBuff, const double clipNormVal) {
if(xTadOffsets == nullptr) { // means whole array
clipByNormBPWholeArrCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal));
clipByNormBPCalcGradCuda<X,Z><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal));
}
else // means tads using
clipByNormBPTadsCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, xTadOffsets, vy, yShapeInfo, yTadOffsets, vz, zShapeInfo, zTadOffsets, static_cast<Z>(clipNormVal));
}
BUILD_DOUBLE_TEMPLATE(template void clipByNormBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong* xTadOffsets, const void *vy, const Nd4jLong *yShapeInfo, const Nd4jLong* yTadOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) {
PointersManager manager(context, "clipByNormBP");
const double clipNormVal = clipNorm.e<double>(0);
const auto xType = input.dataType();
const auto zType = gradI.dataType();
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int sharedMem = threadsPerBlock * 2 * input.sizeOfT() + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
if(dimensions.empty() || dimensions.size() == input.rankOf()) { // means whole array
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), nullptr, gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), nullptr, gradI.getSpecialBuffer(), gradI.getSpecialShapeInfo(), nullptr, context->getReductionPointer(), clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
}
else { // means tads using
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packY = ConstantTadHelper::getInstance()->tadForDimensions(gradO.getShapeInfo(), dimensions);
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), dimensions);
const int blocksPerGrid = packX.numberOfTads();
BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradO.getSpecialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), gradI.getSpecialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), nullptr, clipNormVal), FLOAT_TYPES, FLOAT_TYPES);
}
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
template <typename T>
static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) {
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
if (i != r) {
T e0 = input[shape::getIndexOffset(i, shape, len)];
T e1 = input[shape::getIndexOffset(r, shape, len)];
//math::nd4j_swap<T>(input(i), input(r));
input[shape::getIndexOffset(i, shape, len)] = e1;
input[shape::getIndexOffset(r, shape, len)] = e0;
}
}
}
template <typename T>
static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) {
// PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
auto tid = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) {
int r = rng->relativeInt(i) % i;
output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)];
if(i != r) {
output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)];
// output.p(r, input.e<T>(indices[i]));
// math::nd4j_swap<int>(indices[i], indices[r]);
atomicExch(&indices[i], indices[r]);
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
// check edge cases first
int temp;
const int firstDim = input.sizeAt(0);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({&output}, {&input});
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) {
// apply Fisher-Yates shuffle
nd4j::graph::RandomGenerator* dRandom = nullptr;
cudaMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator));
cudaMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), cudaMemcpyHostToDevice);
T* inputBuf = reinterpret_cast<T*>(input.specialBuffer());
if(isInplace) {
swapShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom);
}
else {
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
cudaMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), cudaMemcpyDeviceToDevice);
//output.p<T>(Nd4jLong(0), input.e<T>(0));
PointersManager pointersManager(context, "helper::randomShuffle_");
int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int)));
T* outputBuf = reinterpret_cast<T*>(output.specialBuffer());
fillShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom);
pointersManager.synchronize();
}
// rng.rewindH(firstDim - 1);
cudaFree(dRandom);
}
else {
// evaluate sub-arrays list of input array through all dimensions excluding first one
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
auto subArrsListIn = input.allTensorsAlongDimension(dimensions);
// apply Fisher-Yates shuffle
if(isInplace) {
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
if(i != r)
subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r));
}
}
else {
// evaluate sub-arrays list of output array through all dimensions excluding first one
auto subArrsListOut = output.allTensorsAlongDimension(dimensions);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0);
bool isZeroShuffled = false;
PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
for(int i = firstDim - 1; i > 0; --i) {
int r = rng.relativeInt(i) % i;
subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r]));
if(r == 0)
isZeroShuffled = true;
if(i != r) {
subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i]));
math::nd4j_swap<int>(indices[i], indices[r]);
}
}
if(!isZeroShuffled)
subArrsListOut->at(0)->assign(subArrsListIn->at(0));
delete subArrsListOut;
}
rng.rewindH(firstDim-1);
delete subArrsListIn;
}
NDArray::registerSpecialUse({&output}, {&input});
}
void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
void eye(nd4j::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* z;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
z = inputBuffer + inputOffsets[arr];
}
__syncthreads();
for (int j = threadIdx.x; j < len; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, len);
if(norm2Buf[arr] > clipNorm)
z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c'
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) {
for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) {
__shared__ T* x, *z;
__shared__ Nd4jLong lenX, lenZ;
__shared__ T norm2;
if (threadIdx.x == 0) {
lenX = shape::length(shape);
x = inputBuffer + inputOffsets[arr];
z = outputBuffer + outputOffsets[arr];
lenZ = shape::length(outputShape);
norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)];
//printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ);
}
__syncthreads();
for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) {
auto xIndex = shape::getIndexOffset(j, shape, lenX);
auto zIndex = shape::getIndexOffset(j, outputShape, lenZ);
if(norm2 > clipNorm) {
z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c'
} else {
z[zIndex] = x[xIndex];
}
//printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]);
}
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) {
const int rank = input.rankOf();
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions);
clipNormA.syncToHost();
//norm2.printBuffer("Norm2");
T const clipNorm = clipNormA.e<T>(0);
//clipNormA.printBuffer("ClipNorm");
auto stream = context->getCudaStream();
if (isInplace) {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
input *= clipNorm / norm2Val;
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
//auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
clipByNormInplaceKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
else {
if(norm2.lengthOf() == 1) {
norm2.syncToHost();
T norm2Val = norm2.e<T>(0);
if(norm2Val > clipNorm)
output.assign( input * (clipNorm / norm2Val));
else
output.assign( input );
}
else {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions);
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions);
T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer());
T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer());
T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer());
clipByNormKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm);
}
}
}
void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
template <typename T>
void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm = NDArrayFactory::create<T>(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list]))
PRAGMA_OMP_PARALLEL_FOR
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, nullptr, nullptr);// = nd4j::math::nd4j_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = clipNorm / globalNorm.e<T>(0);
PRAGMA_OMP_PARALLEL_FOR
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
}
else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, output);
}
}
}
void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
auto cn = clipNorm.e<T>(0);
if (dimensions.size() == 0) {
// all-reduce
T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf();
if (n2 <= cn) {
if (!isInplace)
output.assign(input);
}
else {
const T factor = cn / n2;
//auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
//input.applyLambda<T>(lambda, &output);
output.assign(input * factor);
}
}
else {
// along dimension
auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false);
if (!isInplace)
output.assign(input);
auto tads = output.allTensorsAlongDimension(dimensions);
auto outTads = output.allTensorsAlongDimension(dimensions);
// TODO: make this CUDA-compliant somehow
for (int e = 0; e < tads->size(); e++) {
T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf();
const T factor = cn / n2;
if (n2 > cn) {
//auto lambda = LAMBDA_T(_x, factor) {return _x * factor;};
tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output);
}
}
delete tads;
delete outTads;
}
}
void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES);
/*
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
*/
template <typename T>
static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape, length);
auto outputOffset = shape::getIndexOffset(e, outputShape, length);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
}
}
}
|
5c9def0ecaadc822b45f2c4fee55dc6d1f651853.hip | // !!! This is a file automatically generated by hipify!!!
/**
* a-la fgrep utility only in parallel using CUDA
* grep input lines for precompiled patterns
* @author Denis Kokarev
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <stdarg.h>
#include <getopt.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include "transpose.cuh"
#include "die.h"
#include "par.hh"
#include "act.h"
constexpr int THREADS = 256;
constexpr int STRSZ = 1<<14; // must be under int16
constexpr int STREAMS = STRSZ;
struct MATCH {
int16_t pos;
uint16_t sz;
};
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
#define checkCuda(e) { if (e!=hipSuccess) { die("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); } }
// owns input the data buffers for the pipeline
class ReadStage: public PipeHeadExec {
public:
struct TRESULT {
char *buf;
int sz;
};
private:
static constexpr int stages = 4; // need to drag the input over 4 pipe segments
FILE *fin;
TRESULT res[stages];
char *pinned_buf;
private:
virtual void *next() override {
if (!feof(fin)) {
if (batch%stages == 0) // wrap the remaining line around
memcpy(res[0].buf-STRSZ, res[stages-1].buf+STREAMS*STRSZ-STRSZ, STRSZ);
TRESULT &r = res[batch%stages];
r.sz = fread(r.buf, 1, STREAMS*STRSZ, fin);
if (r.sz < 0)
die("Read error");
return &r;
} else {
return nullptr;
}
}
public:
ReadStage(FILE *fin):PipeHeadExec(), fin(fin) {
checkCuda(hipHostMalloc(&pinned_buf, STRSZ*STREAMS*stages+STRSZ));
res[0] = {pinned_buf+STRSZ, 0};
for (int i=1; i<stages; i++)
res[i] = {res[i-1].buf+STRSZ*STREAMS, 0};
}
~ReadStage() {
checkCuda(hipHostFree(pinned_buf));
}
};
class CudaH2DStage: public PipeStageExec {
public:
struct TRESULT {
ReadStage::TRESULT in;
char *d_ibuf; // device original input
hipStream_t stream;
};
private:
static constexpr int stages = 2;
TRESULT res[stages];
virtual void *next(void *arg) override {
TRESULT &r = res[batch%stages];
r.in = *(ReadStage::TRESULT*)arg;
checkCuda(hipMemcpyAsync(r.d_ibuf, r.in.buf, r.in.sz, hipMemcpyHostToDevice, r.stream));
checkCuda(hipStreamSynchronize(r.stream));
return &r;
}
public:
CudaH2DStage(PipeHeadExec &parent):PipeStageExec(parent) {
for (int i=0; i<stages; i++) {
res[i].in = ReadStage::TRESULT {nullptr, 0};
checkCuda(hipMalloc(&res[i].d_ibuf, STREAMS*STRSZ*sizeof(*res[i].d_ibuf)));
checkCuda(hipStreamCreate(&res[i].stream));
}
}
~CudaH2DStage() {
for (int i=0; i<stages; i++) {
res[i].in = ReadStage::TRESULT {nullptr, 0};
checkCuda(hipFree(res[i].d_ibuf));
checkCuda(hipStreamDestroy(res[i].stream));
}
}
};
struct FGREP_STATE {
unsigned node;
int16_t lbeg; // position where last line started
uint16_t match;
};
/**
* consume next char
* @returns next automata node, where 1 means at root
* only when at root, it is safe to refresh/reattach the trie
*/
__device__ inline unsigned cuda_act_next_char(const ACT_NODE *act, unsigned node, char ch) {
unsigned bmask = ch;
for (int i=0; i<8/ACT_PAGE_P2; i++,bmask >>= ACT_PAGE_P2)
node = act[node].next[bmask & ((1<<ACT_PAGE_P2)-1)];
return node;
}
/**
* iterate the sequence of all matches triggered by last char
* NB: result_node will be spoiled
* @return 1 when match was found, *val will be populated with value
* @return 0 when no more matches
*/
__device__ inline int cuda_act_next_match(const ACT_NODE *act, unsigned *result_node, int *val) {
if (!act[*result_node].end)
*result_node = act[*result_node].sufref;
if (*result_node != ACT_ROOT) {
*val = act[*result_node].val;
*result_node = act[*result_node].sufref;
return 1;
} else {
return 0;
}
}
struct CHAR_BUF {
const char * const ibuf;
const int ibufsz;
int pos;
int row;
int col;
const int stride;
const char *s;
};
// *s++ in our transposed buffer
__device__ inline short ch_next(CHAR_BUF &ch) {
if (ch.pos < ch.ibufsz) {
short c = *ch.s;
ch.pos++;
ch.row++;
ch.s += ch.stride;
if (ch.row == STRSZ) {
ch.col++;
ch.row = 0;
ch.s = ch.ibuf+ch.col;
}
return c;
} else {
return -1;
}
}
__device__ inline short ch_seek_nl(CHAR_BUF &ch, unsigned limit) {
short c = -1;
while (limit && (c=ch_next(ch))>=0 && (c != '\n'))
limit--;
return c;
}
__global__ void cuda_fgrep(MATCH *match, const char *ibuf, int ibufsz, unsigned *nmatch, const ACT_NODE *act, FGREP_STATE *states) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x; // STREAMS
MATCH *m = match+col;
unsigned nm = 0; // number of matches > STRSZ works as error indicator
FGREP_STATE state;
CHAR_BUF ch { ibuf, ibufsz-col*STRSZ, 0, 0, col, stride, ibuf+col };
//__syncthreads(); // redundant, as the first thread will always run in an earlier block
short c;
if (col == 0) {
state = states[STREAMS-1];
c = ch_next(ch);
} else {
if ((c=ch_seek_nl(ch, STRSZ)) != '\n') {
state = FGREP_STATE {ACT_ROOT, 0, 0};
c = -1;
}
}
while (c >= 0) {
if (c == '\n') {
if (state.match) {
*m = MATCH {state.lbeg, uint16_t(ch.pos-state.lbeg-1)};
nm++;
m += stride;
}
state = FGREP_STATE {ACT_ROOT, int16_t(ch.pos), 0};
if (ch.row > 0 && ch.col > col)
break;
}
if (!state.match) {
state.node = cuda_act_next_char(act, state.node, c);
unsigned result_node = state.node;
int unused;
state.match = cuda_act_next_match(act, &result_node, &unused);
}
c = ch_next(ch);
}
state.lbeg -= STRSZ;
states[col] = state;
nmatch[col] = nm;
}
class CudaFgrep {
static constexpr int MATCH_RATIO = 10; // lowest ave line len to save mem
char *d_tibuf; // transposed input
MATCH *d_tobuf; // transposed output
MATCH *d_obuf; // regular output
unsigned *d_nmatch;
FGREP_STATE *d_state;
ACT_NODE *d_act;
public:
CudaFgrep(const ACT *act) {
checkCuda(hipMalloc(&d_tibuf, STREAMS*STRSZ*sizeof(*d_tibuf)));
checkCuda(hipMalloc(&d_tobuf, STREAMS*(STRSZ/MATCH_RATIO)*sizeof(*d_tobuf)));
checkCuda(hipMalloc(&d_obuf, STREAMS*(STRSZ/MATCH_RATIO)*sizeof(*d_tobuf)));
checkCuda(hipMalloc(&d_nmatch, STREAMS*sizeof(*d_nmatch)));
checkCuda(hipMalloc(&d_state, STREAMS*sizeof(*d_state)));
FGREP_STATE first_state {ACT_ROOT, 0, 0};
checkCuda(hipMemcpy(&d_state[STREAMS-1], &first_state, sizeof(FGREP_STATE), hipMemcpyHostToDevice));
checkCuda(hipMalloc(&d_act, act->sz));
checkCuda(hipMemcpy(d_act, act->nodes, act->sz, hipMemcpyHostToDevice));
}
~CudaFgrep() {
checkCuda(hipFree(d_tibuf));
checkCuda(hipFree(d_tobuf));
checkCuda(hipFree(d_obuf));
checkCuda(hipFree(d_nmatch));
checkCuda(hipFree(d_state));
checkCuda(hipFree(d_act));
}
void operator()(hipStream_t stream, const char *d_ibuf, int ibuf_sz, MATCH *obuf, unsigned *nmatch, unsigned &rowsz) {
dim3 dimGrid(STRSZ/TRANSPOSE_TILE_DIM, STREAMS/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, stream, d_tibuf, d_ibuf);
checkCuda(hipGetLastError());
hipLaunchKernelGGL(( cuda_fgrep), dim3(STREAMS/THREADS),dim3(THREADS),0,stream, d_tobuf, d_tibuf, ibuf_sz, d_nmatch, d_act, d_state);
checkCuda(hipGetLastError());
checkCuda(hipMemcpyAsync(nmatch, d_nmatch, sizeof(*nmatch)*STREAMS, hipMemcpyDeviceToHost, stream));
checkCuda(hipStreamSynchronize(stream));
unsigned nmx = rowsz = *std::max_element(nmatch, nmatch+STREAMS);
if (nmx > 0) {
if (nmx > STRSZ)
die("Lines cannot be longer than %d", int(STRSZ));
rowsz = (nmx+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM*TRANSPOSE_TILE_DIM;
dim3 dimGrid(STREAMS/TRANSPOSE_TILE_DIM, rowsz/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, stream, d_obuf, d_tobuf);
checkCuda(hipGetLastError());
checkCuda(hipMemcpyAsync(obuf, d_obuf, rowsz*STREAMS*sizeof(obuf[0]), hipMemcpyDeviceToHost, stream));
checkCuda(hipStreamSynchronize(stream));
}
}
};
class FgrepStage: public PipeStageExec {
public:
struct TRESULT {
CudaH2DStage::TRESULT in;
unsigned match_row_sz;
MATCH *match;
unsigned *nmatch;
};
private:
static constexpr int stages = 2;
TRESULT res[stages];
CudaFgrep cfgrep;
virtual void *next(void *arg) override {
TRESULT &r = res[batch%stages];
r.in = *(CudaH2DStage::TRESULT*)arg;
cfgrep(r.in.stream, r.in.d_ibuf, r.in.in.sz, r.match, r.nmatch, r.match_row_sz);
return &r;
}
public:
FgrepStage(PipeHeadExec &parent, ACT *act):PipeStageExec(parent),cfgrep(act) {
for (int i=0; i<stages; i++) {
checkCuda(hipHostMalloc(&res[i].match, sizeof(MATCH)*STRSZ*STREAMS));
checkCuda(hipHostMalloc(&res[i].nmatch, sizeof(unsigned)*STREAMS));
}
}
~FgrepStage() {
for (int i=0; i<stages; i++) {
checkCuda(hipHostFree(res[i].match));
checkCuda(hipHostFree(res[i].nmatch));
}
}
};
void prn(FILE *fout, const char *ibuf, const MATCH *match, const unsigned *nmatch, const unsigned match_row_sz) {
for (int stream=0; stream<STREAMS; stream++) {
unsigned sz = nmatch[stream];
const MATCH *mm = match+match_row_sz*stream;
const char *s = ibuf+STRSZ*stream;
for (unsigned i=0; i<sz; i++) {
if ((fwrite(s+mm[i].pos, 1, mm[i].sz, fout)) != (int)mm[i].sz)
die("Write error");
if (fputc('\n', fout) != '\n')
die("Write error");
}
}
}
static void usage(char *cmd) {
printf("Match the input strings with actcomp precompiled automata,\n");
printf("works similar to fgrep\n");
printf("@author Denis Kokarev\n");
printf("Usage:\n");
printf("\t%s patterns.bin <input.txt >filtered.txt\n", cmd);
printf("patterns.bin - precompiled patterns.txt file, see `actcomp -h`\n");
}
int main(int argc, char **argv) {
int c;
opterr = 0;
while ((c = getopt(argc, argv, "h")) != -1) {
switch (c) {
case 'h':
usage(argv[0]);
return(0);
default:
die("unknown cmd line argument");
}
}
if (argc-optind < 1) {
usage(argv[0]);
die("run as `%s patterns.bin <file`", argv[0]);
}
ACT act;
int rc = act_attach_mmap(&act, argv[optind]);
if (rc != 0)
die("couldn't use specified patterns file %s, act_attach_mmap() error code %d", argv[optind], rc);
{
ReadStage read(stdin);
CudaH2DStage h2d(read);
FgrepStage fgrep(h2d, &act);
for (auto it:PipeOutput(fgrep)) {
FgrepStage::TRESULT *r = (FgrepStage::TRESULT*)it;
prn(stdout, r->in.in.buf, r->match, r->nmatch, r->match_row_sz);
}
}
rc = act_detach_mmap(&act);
if (rc != 0)
die("act_detach_mmap() error code %d", rc);
}
| 5c9def0ecaadc822b45f2c4fee55dc6d1f651853.cu | /**
* a-la fgrep utility only in parallel using CUDA
* grep input lines for precompiled patterns
* @author Denis Kokarev
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <stdarg.h>
#include <getopt.h>
#include <cuda_runtime.h>
#include <assert.h>
#include "transpose.cuh"
#include "die.h"
#include "par.hh"
#include "act.h"
constexpr int THREADS = 256;
constexpr int STRSZ = 1<<14; // must be under int16
constexpr int STREAMS = STRSZ;
struct MATCH {
int16_t pos;
uint16_t sz;
};
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
#define checkCuda(e) { if (e!=cudaSuccess) { die("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); } }
// owns input the data buffers for the pipeline
class ReadStage: public PipeHeadExec {
public:
struct TRESULT {
char *buf;
int sz;
};
private:
static constexpr int stages = 4; // need to drag the input over 4 pipe segments
FILE *fin;
TRESULT res[stages];
char *pinned_buf;
private:
virtual void *next() override {
if (!feof(fin)) {
if (batch%stages == 0) // wrap the remaining line around
memcpy(res[0].buf-STRSZ, res[stages-1].buf+STREAMS*STRSZ-STRSZ, STRSZ);
TRESULT &r = res[batch%stages];
r.sz = fread(r.buf, 1, STREAMS*STRSZ, fin);
if (r.sz < 0)
die("Read error");
return &r;
} else {
return nullptr;
}
}
public:
ReadStage(FILE *fin):PipeHeadExec(), fin(fin) {
checkCuda(cudaMallocHost(&pinned_buf, STRSZ*STREAMS*stages+STRSZ));
res[0] = {pinned_buf+STRSZ, 0};
for (int i=1; i<stages; i++)
res[i] = {res[i-1].buf+STRSZ*STREAMS, 0};
}
~ReadStage() {
checkCuda(cudaFreeHost(pinned_buf));
}
};
class CudaH2DStage: public PipeStageExec {
public:
struct TRESULT {
ReadStage::TRESULT in;
char *d_ibuf; // device original input
cudaStream_t stream;
};
private:
static constexpr int stages = 2;
TRESULT res[stages];
virtual void *next(void *arg) override {
TRESULT &r = res[batch%stages];
r.in = *(ReadStage::TRESULT*)arg;
checkCuda(cudaMemcpyAsync(r.d_ibuf, r.in.buf, r.in.sz, cudaMemcpyHostToDevice, r.stream));
checkCuda(cudaStreamSynchronize(r.stream));
return &r;
}
public:
CudaH2DStage(PipeHeadExec &parent):PipeStageExec(parent) {
for (int i=0; i<stages; i++) {
res[i].in = ReadStage::TRESULT {nullptr, 0};
checkCuda(cudaMalloc(&res[i].d_ibuf, STREAMS*STRSZ*sizeof(*res[i].d_ibuf)));
checkCuda(cudaStreamCreate(&res[i].stream));
}
}
~CudaH2DStage() {
for (int i=0; i<stages; i++) {
res[i].in = ReadStage::TRESULT {nullptr, 0};
checkCuda(cudaFree(res[i].d_ibuf));
checkCuda(cudaStreamDestroy(res[i].stream));
}
}
};
struct FGREP_STATE {
unsigned node;
int16_t lbeg; // position where last line started
uint16_t match;
};
/**
* consume next char
* @returns next automata node, where 1 means at root
* only when at root, it is safe to refresh/reattach the trie
*/
__device__ inline unsigned cuda_act_next_char(const ACT_NODE *act, unsigned node, char ch) {
unsigned bmask = ch;
for (int i=0; i<8/ACT_PAGE_P2; i++,bmask >>= ACT_PAGE_P2)
node = act[node].next[bmask & ((1<<ACT_PAGE_P2)-1)];
return node;
}
/**
* iterate the sequence of all matches triggered by last char
* NB: result_node will be spoiled
* @return 1 when match was found, *val will be populated with value
* @return 0 when no more matches
*/
__device__ inline int cuda_act_next_match(const ACT_NODE *act, unsigned *result_node, int *val) {
if (!act[*result_node].end)
*result_node = act[*result_node].sufref;
if (*result_node != ACT_ROOT) {
*val = act[*result_node].val;
*result_node = act[*result_node].sufref;
return 1;
} else {
return 0;
}
}
struct CHAR_BUF {
const char * const ibuf;
const int ibufsz;
int pos;
int row;
int col;
const int stride;
const char *s;
};
// *s++ in our transposed buffer
__device__ inline short ch_next(CHAR_BUF &ch) {
if (ch.pos < ch.ibufsz) {
short c = *ch.s;
ch.pos++;
ch.row++;
ch.s += ch.stride;
if (ch.row == STRSZ) {
ch.col++;
ch.row = 0;
ch.s = ch.ibuf+ch.col;
}
return c;
} else {
return -1;
}
}
__device__ inline short ch_seek_nl(CHAR_BUF &ch, unsigned limit) {
short c = -1;
while (limit && (c=ch_next(ch))>=0 && (c != '\n'))
limit--;
return c;
}
__global__ void cuda_fgrep(MATCH *match, const char *ibuf, int ibufsz, unsigned *nmatch, const ACT_NODE *act, FGREP_STATE *states) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x; // STREAMS
MATCH *m = match+col;
unsigned nm = 0; // number of matches > STRSZ works as error indicator
FGREP_STATE state;
CHAR_BUF ch { ibuf, ibufsz-col*STRSZ, 0, 0, col, stride, ibuf+col };
//__syncthreads(); // redundant, as the first thread will always run in an earlier block
short c;
if (col == 0) {
state = states[STREAMS-1];
c = ch_next(ch);
} else {
if ((c=ch_seek_nl(ch, STRSZ)) != '\n') {
state = FGREP_STATE {ACT_ROOT, 0, 0};
c = -1;
}
}
while (c >= 0) {
if (c == '\n') {
if (state.match) {
*m = MATCH {state.lbeg, uint16_t(ch.pos-state.lbeg-1)};
nm++;
m += stride;
}
state = FGREP_STATE {ACT_ROOT, int16_t(ch.pos), 0};
if (ch.row > 0 && ch.col > col)
break;
}
if (!state.match) {
state.node = cuda_act_next_char(act, state.node, c);
unsigned result_node = state.node;
int unused;
state.match = cuda_act_next_match(act, &result_node, &unused);
}
c = ch_next(ch);
}
state.lbeg -= STRSZ;
states[col] = state;
nmatch[col] = nm;
}
class CudaFgrep {
static constexpr int MATCH_RATIO = 10; // lowest ave line len to save mem
char *d_tibuf; // transposed input
MATCH *d_tobuf; // transposed output
MATCH *d_obuf; // regular output
unsigned *d_nmatch;
FGREP_STATE *d_state;
ACT_NODE *d_act;
public:
CudaFgrep(const ACT *act) {
checkCuda(cudaMalloc(&d_tibuf, STREAMS*STRSZ*sizeof(*d_tibuf)));
checkCuda(cudaMalloc(&d_tobuf, STREAMS*(STRSZ/MATCH_RATIO)*sizeof(*d_tobuf)));
checkCuda(cudaMalloc(&d_obuf, STREAMS*(STRSZ/MATCH_RATIO)*sizeof(*d_tobuf)));
checkCuda(cudaMalloc(&d_nmatch, STREAMS*sizeof(*d_nmatch)));
checkCuda(cudaMalloc(&d_state, STREAMS*sizeof(*d_state)));
FGREP_STATE first_state {ACT_ROOT, 0, 0};
checkCuda(cudaMemcpy(&d_state[STREAMS-1], &first_state, sizeof(FGREP_STATE), cudaMemcpyHostToDevice));
checkCuda(cudaMalloc(&d_act, act->sz));
checkCuda(cudaMemcpy(d_act, act->nodes, act->sz, cudaMemcpyHostToDevice));
}
~CudaFgrep() {
checkCuda(cudaFree(d_tibuf));
checkCuda(cudaFree(d_tobuf));
checkCuda(cudaFree(d_obuf));
checkCuda(cudaFree(d_nmatch));
checkCuda(cudaFree(d_state));
checkCuda(cudaFree(d_act));
}
void operator()(cudaStream_t stream, const char *d_ibuf, int ibuf_sz, MATCH *obuf, unsigned *nmatch, unsigned &rowsz) {
dim3 dimGrid(STRSZ/TRANSPOSE_TILE_DIM, STREAMS/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
transposeNoBankConflicts<<<dimGrid, dimBlock, 0, stream>>>(d_tibuf, d_ibuf);
checkCuda(cudaGetLastError());
cuda_fgrep<<<STREAMS/THREADS,THREADS,0,stream>>>(d_tobuf, d_tibuf, ibuf_sz, d_nmatch, d_act, d_state);
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpyAsync(nmatch, d_nmatch, sizeof(*nmatch)*STREAMS, cudaMemcpyDeviceToHost, stream));
checkCuda(cudaStreamSynchronize(stream));
unsigned nmx = rowsz = *std::max_element(nmatch, nmatch+STREAMS);
if (nmx > 0) {
if (nmx > STRSZ)
die("Lines cannot be longer than %d", int(STRSZ));
rowsz = (nmx+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM*TRANSPOSE_TILE_DIM;
dim3 dimGrid(STREAMS/TRANSPOSE_TILE_DIM, rowsz/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
transposeNoBankConflicts<<<dimGrid, dimBlock, 0, stream>>>(d_obuf, d_tobuf);
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpyAsync(obuf, d_obuf, rowsz*STREAMS*sizeof(obuf[0]), cudaMemcpyDeviceToHost, stream));
checkCuda(cudaStreamSynchronize(stream));
}
}
};
class FgrepStage: public PipeStageExec {
public:
struct TRESULT {
CudaH2DStage::TRESULT in;
unsigned match_row_sz;
MATCH *match;
unsigned *nmatch;
};
private:
static constexpr int stages = 2;
TRESULT res[stages];
CudaFgrep cfgrep;
virtual void *next(void *arg) override {
TRESULT &r = res[batch%stages];
r.in = *(CudaH2DStage::TRESULT*)arg;
cfgrep(r.in.stream, r.in.d_ibuf, r.in.in.sz, r.match, r.nmatch, r.match_row_sz);
return &r;
}
public:
FgrepStage(PipeHeadExec &parent, ACT *act):PipeStageExec(parent),cfgrep(act) {
for (int i=0; i<stages; i++) {
checkCuda(cudaMallocHost(&res[i].match, sizeof(MATCH)*STRSZ*STREAMS));
checkCuda(cudaMallocHost(&res[i].nmatch, sizeof(unsigned)*STREAMS));
}
}
~FgrepStage() {
for (int i=0; i<stages; i++) {
checkCuda(cudaFreeHost(res[i].match));
checkCuda(cudaFreeHost(res[i].nmatch));
}
}
};
void prn(FILE *fout, const char *ibuf, const MATCH *match, const unsigned *nmatch, const unsigned match_row_sz) {
for (int stream=0; stream<STREAMS; stream++) {
unsigned sz = nmatch[stream];
const MATCH *mm = match+match_row_sz*stream;
const char *s = ibuf+STRSZ*stream;
for (unsigned i=0; i<sz; i++) {
if ((fwrite(s+mm[i].pos, 1, mm[i].sz, fout)) != (int)mm[i].sz)
die("Write error");
if (fputc('\n', fout) != '\n')
die("Write error");
}
}
}
static void usage(char *cmd) {
printf("Match the input strings with actcomp precompiled automata,\n");
printf("works similar to fgrep\n");
printf("@author Denis Kokarev\n");
printf("Usage:\n");
printf("\t%s patterns.bin <input.txt >filtered.txt\n", cmd);
printf("patterns.bin - precompiled patterns.txt file, see `actcomp -h`\n");
}
int main(int argc, char **argv) {
int c;
opterr = 0;
while ((c = getopt(argc, argv, "h")) != -1) {
switch (c) {
case 'h':
usage(argv[0]);
return(0);
default:
die("unknown cmd line argument");
}
}
if (argc-optind < 1) {
usage(argv[0]);
die("run as `%s patterns.bin <file`", argv[0]);
}
ACT act;
int rc = act_attach_mmap(&act, argv[optind]);
if (rc != 0)
die("couldn't use specified patterns file %s, act_attach_mmap() error code %d", argv[optind], rc);
{
ReadStage read(stdin);
CudaH2DStage h2d(read);
FgrepStage fgrep(h2d, &act);
for (auto it:PipeOutput(fgrep)) {
FgrepStage::TRESULT *r = (FgrepStage::TRESULT*)it;
prn(stdout, r->in.in.buf, r->match, r->nmatch, r->match_row_sz);
}
}
rc = act_detach_mmap(&act);
if (rc != 0)
die("act_detach_mmap() error code %d", rc);
}
|
002035f1db1bfe4cb6f892d1ee42252e6e903cc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* ******************************************************
* *********** FILL LJ POTENTIALS TABLE *************
* ******************************************************
* CALCULATE LJ_POTENTIAL OF PARTICLE (WITH SIGMA=s AND EPSILON=e) INTERACTION WITH OTHER PARTICLES ***FOR A SPECIFIC r VALUE***
* THE OTHER PARTICLE TYPE DEPENDES ON Thread
* EPS and SIG CONTAINS VALUES OF THE OTHER EPSILONS AND SIGMAS
*
*/
__global__ void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG, double e, double s, double var, int width, int height)
{
/* x determines the assigned value of r */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
/*y value determines the type of the other particle(position in EPS and SIG arrays */
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Get sig and epsilon values to use in LJ calculation(avarage of both types values) */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
/* get assigned r (based on thread x) */
double r = (double) MIn+x*var;
/* Calc. and save result */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
/** ************************************************
* ******** FILL DERIVATIVES TABLE ***********
* ************************************************
* CALCULATE ***DERIVATIVE** OF LJ_POTENTIAL OF PARTICLE (WITH SIGMA=s AND EPSILON=e) INTERACTION WITH OTHER PARTICLES ***FOR A SPECIFIC r VALUE***
* THE OTHER PARTICLE TYPE DEPENDES ON Thread
* EPS and SIG CONTAINS VALUES OF THE OTHER EPSILONS AND SIGMAS
*/
__global__ void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG, double e, double s, double var, int width, int height)
{
/* x determines the assigned value of r */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
/*y value determines the type of the other particle(position in EPS and SIG arrays */
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Get sig and epsilon values to use in LJ calculation(avarage of both types values) */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
/* get assigned r (based on thread x) */
double r = (double) MIn+x*var;
/* Calc. and save result */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
/* **************************************************************************
* ******** DISTANCE BETWEEN CLOSEST IMAGE OF ANY PARTICLE **************
* *************************************************************************
*/
// THIS KERNEL CALCULATES THE CLOSEST IMAGE BETWEEN TWO PARTICLES IN A SIMULATION USING PERIODIC BOUNDARIES
__global__ void close_distances_kernel(double* X, double* Y, double* Z, double* R, double* position_x, double* position_y, double* position_z, double box_x, double box_y, double box_z, int width, int height)
{
/* i,j determines the two particles particle to consider*/
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
// unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
/* ***************************************************
* ******** DISTANCE BETWEEN PARTICLES *************
* *********--- NON Periodic --------***************
* **************************************************
*/
__global__ void distances_kernel(double* R, double* X, double* Y, double* Z, double* x1, double* y1, double* z1, int width, int height)
{
// PARTICLES INDEX
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/***************************************************************************/
/* ***************************************************
* *********** DERIVATIVES CALCULATION ****************
* **************************************************
* **************************************************
*/
/**********************************************************************
******** POTENTIALS-MODE CALCULATION ------ GLOBAL MEMORY **************/
__global__ void potentialsMode_memory_kernel(float* LJPot,double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height )
{
/*GET INDEX OF ELEMENT*/
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** partic 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** partic 1 **/
/* r VALUE */
double erre=r[y*width+x];
double result;
if(x >= width || y >= height) {return;}
/* IF I AM TRYING TO CALCULATE POTENTIAL AGAINS ITSELF OR BETWEEN OUT OF RANGE PARTICLES POT=0 -> dPot=0*/
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/** type of particles **/
int t_o_p_1 = item_to_type[y] * num_types; //this one decides which subMatrix to use
int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these
int posInicial = t_o_p_2 * num_samples_r;
/** Convierto r a subndice de matriz de lennard-jones **/
// float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
// int index=0;
double superior=erre + (DIF_FINITAS_DELTA*DIST/num_samples_r);
double inferior=erre - (DIF_FINITAS_DELTA*DIST/num_samples_r);
int indexsup=posInicial + ((superior-MIn)*(num_samples_r/DIST));
int indexinf=posInicial + ((inferior-MIn)*(num_samples_r/DIST));
if(superior > MAx)
indexsup=posInicial + num_samples_r - 1;
if(superior<MIn)
indexsup=posInicial;
if(inferior<MIn)
indexinf=posInicial;
if(inferior>MAx)
indexinf=posInicial + num_samples_r - 1;
/* Get value in higher position*/
double E_r_up = (double) LJPot[indexsup];
/*Get value in lower position */
double E_r_dwn = (double) LJPot[indexinf];
/*CALCULATE DISCRETE DERIVATIVE*/
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / num_samples_r;
result = (E_r_up - E_r_dwn) / (r_dif);
}
/*SAVE RESULT*/
dEr[y*width+x]=result;
}
/*************************************************************************/
/* ***************************************************
* ******** POTENTIALS-MODE ----- TEXTURE *********
* **************************************************
*/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF POTENTIALS STORED IN DEVICEs TEXTURE MEMORY
__global__ void potentialsMode_texture_kernel(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double erre= r[y*width+x];
double result;
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (erre - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / num_samples_r;
result = (E_r_up - E_r_dwn) / (r_dif);
}
dEr[y*width+x]= result;
//dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
/* ************************************************************************
* ******* DERIVATIVE-MODE CALCULATION ----- GLOBAL MEMORY **************
* ************************************************************************
*/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF ***DERIVATIVE OF POTENTIALS**** STORED IN DEVICEs GLOBAL MEMORY
__global__ void derivativeMode_memory_kernel(float* dLJPot,double* dEr, double* r, double cut,int* item_to_type,
int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double erre=r[y*width+x];
double result;
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
//float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
//float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
int t_o_p_1 = item_to_type[y] * num_types; //this one decides which subMatrix to use
int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these
int posInicial=t_o_p_2 * num_samples_r; //comienzo de la fila??
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
//float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
//int index=0;
int posMax=num_samples_r -2;
float sesgo=(erre-MIn) *(num_samples_r/DIST);
if(sesgo>posMax)
result = dLJPot[posInicial+ posMax];
else
if(sesgo<0)
result = dLJPot[posInicial];
else
result = dLJPot[posInicial+(int)ceil(sesgo)];
}
dEr[y*width+x]=result;
/*
if(erre > MAx)
dEr[y*width+x] = dLJPot[posInicial + num_samples_r - 1];
else
if(erre<MIn)
dEr[y*width+x]=dLJPot[posInicial];
else{
int sesgoSup=ceil((erre-MIn)*(num_samples_r/DIST));
int sesgoInf= floor((erre-MIn)*(num_samples_r/DIST));
float y1=dLJPot[posInicial + sesgoSup];
float y0=dLJPot[posInicial + sesgoInf];
double x0=sesgoInf*num_samples_r /DIST;
double a = (y1 - y0) / (1);
double b = -a*x0 + y0;
double ybuscado = a *((erre-MIn)*(num_samples_r/DIST)) + b;
dEr[y*width +x]= ybuscado;
//dEr[y*width +x]=dLJPot[posInicial + sesgoSup];
}
*/
//dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
// *****************************************************************************************
/* ************************************************************************
* ******* DERIVATIVE-MODE CALCULATION ----- TEXTURE ********************
* ************************************************************************
*/
/**********************************************************************
******** DERIVATIVE-MODE CALCULATION ---------- TEXTURE **************/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF ***DERIVATIVE OF POTENTIALS**** STORED IN DEVICEs TEXTURE MEMORY(DIRECT FETCH)
__global__ void direct_derivativeMode_E_r(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double result;
/* Dentro del bloque correspondiente */
double erre= r[y*width+x];
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
result=0;
//dEr[y*width+x] = 0;
//return;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (erre - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/* double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
result = (double) tex2D( texRef, index_x, t_o_p_2 );
}
dEr[y*width+x]= result;
}
/* ***************************************************************** **/
/* ************************************************************************
* ***************** ANALYTIC-MODE CALCULATION **************************
* ************************************************************************
*/
/***************************************************************
*********** AnalyticMode CALCULATION *************************/
__global__ void analyticMode_kernel(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
double erre=r[y*width+x];
double result;
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
result = (double) 24.0*eps12*( pow(sig12,6)/ pow(erre,7) - 2 * pow(sig12,12)/ pow(erre,13));
}
dEr[y*width+x]=result;
}
/* ***************************************************
* *********** POTENTIALS CALCULATION ****************
* ***************************************************
* ***************************************************
*/
/* ************************************************************************
* ***************** ANALYTIC-MODE CALCULATION **************************
* ************************************************************************
*/
__global__ void potential_analytic_kernel(double* Er, double* r, double cut, int* item_to_type, int num_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** *********************************************************************************** **/
/* ************************************************************************
* ***************** POTENTIALS-MODE CALCULATION **************************
* ************************************************************************
*/
// THIS KERNEL CALCULATES **POTENTIAL** VALUE FROM A TABLE IN TEXTURE MEMORY
__global__ void potentials_texture_kernel(double* Er, double* r, double cut, int* item_to_type,
int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* num_types);
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/** **************************************************************** **/
| 002035f1db1bfe4cb6f892d1ee42252e6e903cc1.cu |
/**
* ******************************************************
* *********** FILL LJ POTENTIALS TABLE *************
* ******************************************************
* CALCULATE LJ_POTENTIAL OF PARTICLE (WITH SIGMA=s AND EPSILON=e) INTERACTION WITH OTHER PARTICLES ***FOR A SPECIFIC r VALUE***
* THE OTHER PARTICLE TYPE DEPENDES ON Thread
* EPS and SIG CONTAINS VALUES OF THE OTHER EPSILONS AND SIGMAS
*
*/
__global__ void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG, double e, double s, double var, int width, int height)
{
/* x determines the assigned value of r */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
/*y value determines the type of the other particle(position in EPS and SIG arrays */
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Get sig and epsilon values to use in LJ calculation(avarage of both types values) */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
/* get assigned r (based on thread x) */
double r = (double) MIn+x*var;
/* Calc. and save result */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
/** ************************************************
* ******** FILL DERIVATIVES TABLE ***********
* ************************************************
* CALCULATE ***DERIVATIVE** OF LJ_POTENTIAL OF PARTICLE (WITH SIGMA=s AND EPSILON=e) INTERACTION WITH OTHER PARTICLES ***FOR A SPECIFIC r VALUE***
* THE OTHER PARTICLE TYPE DEPENDES ON Thread
* EPS and SIG CONTAINS VALUES OF THE OTHER EPSILONS AND SIGMAS
*/
__global__ void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG, double e, double s, double var, int width, int height)
{
/* x determines the assigned value of r */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
/*y value determines the type of the other particle(position in EPS and SIG arrays */
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Get sig and epsilon values to use in LJ calculation(avarage of both types values) */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
/* get assigned r (based on thread x) */
double r = (double) MIn+x*var;
/* Calc. and save result */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
/* **************************************************************************
* ******** DISTANCE BETWEEN CLOSEST IMAGE OF ANY PARTICLE **************
* *************************************************************************
*/
// THIS KERNEL CALCULATES THE CLOSEST IMAGE BETWEEN TWO PARTICLES IN A SIMULATION USING PERIODIC BOUNDARIES
__global__ void close_distances_kernel(double* X, double* Y, double* Z, double* R, double* position_x, double* position_y, double* position_z, double box_x, double box_y, double box_z, int width, int height)
{
/* i,j determines the two particles particle to consider*/
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
// unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
/* ***************************************************
* ******** DISTANCE BETWEEN PARTICLES *************
* *********--- NON Periodic --------***************
* **************************************************
*/
__global__ void distances_kernel(double* R, double* X, double* Y, double* Z, double* x1, double* y1, double* z1, int width, int height)
{
// PARTICLES INDEX
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/***************************************************************************/
/* ***************************************************
* *********** DERIVATIVES CALCULATION ****************
* **************************************************
* **************************************************
*/
/**********************************************************************
******** POTENTIALS-MODE CALCULATION ------ GLOBAL MEMORY **************/
__global__ void potentialsMode_memory_kernel(float* LJPot,double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height )
{
/*GET INDEX OF ELEMENT*/
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** partic 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** partic 1 **/
/* r VALUE */
double erre=r[y*width+x];
double result;
if(x >= width || y >= height) {return;}
/* IF I AM TRYING TO CALCULATE POTENTIAL AGAINS ITSELF OR BETWEEN OUT OF RANGE PARTICLES POT=0 -> dPot=0*/
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/** type of particles **/
int t_o_p_1 = item_to_type[y] * num_types; //this one decides which subMatrix to use
int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these
int posInicial = t_o_p_2 * num_samples_r;
/** Convierto r a subíndice de matriz de lennard-jones **/
// float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
// int index=0;
double superior=erre + (DIF_FINITAS_DELTA*DIST/num_samples_r);
double inferior=erre - (DIF_FINITAS_DELTA*DIST/num_samples_r);
int indexsup=posInicial + ((superior-MIn)*(num_samples_r/DIST));
int indexinf=posInicial + ((inferior-MIn)*(num_samples_r/DIST));
if(superior > MAx)
indexsup=posInicial + num_samples_r - 1;
if(superior<MIn)
indexsup=posInicial;
if(inferior<MIn)
indexinf=posInicial;
if(inferior>MAx)
indexinf=posInicial + num_samples_r - 1;
/* Get value in higher position*/
double E_r_up = (double) LJPot[indexsup];
/*Get value in lower position */
double E_r_dwn = (double) LJPot[indexinf];
/*CALCULATE DISCRETE DERIVATIVE*/
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / num_samples_r;
result = (E_r_up - E_r_dwn) / (r_dif);
}
/*SAVE RESULT*/
dEr[y*width+x]=result;
}
/*************************************************************************/
/* ***************************************************
* ******** POTENTIALS-MODE ----- TEXTURE *********
* **************************************************
*/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF POTENTIALS STORED IN DEVICEs TEXTURE MEMORY
__global__ void potentialsMode_texture_kernel(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double erre= r[y*width+x];
double result;
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (erre - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / num_samples_r;
result = (E_r_up - E_r_dwn) / (r_dif);
}
dEr[y*width+x]= result;
//dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
/* ************************************************************************
* ******* DERIVATIVE-MODE CALCULATION ----- GLOBAL MEMORY **************
* ************************************************************************
*/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF ***DERIVATIVE OF POTENTIALS**** STORED IN DEVICEs GLOBAL MEMORY
__global__ void derivativeMode_memory_kernel(float* dLJPot,double* dEr, double* r, double cut,int* item_to_type,
int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double erre=r[y*width+x];
double result;
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
//float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
//float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
int t_o_p_1 = item_to_type[y] * num_types; //this one decides which subMatrix to use
int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these
int posInicial=t_o_p_2 * num_samples_r; //comienzo de la fila??
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
//float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
//int index=0;
int posMax=num_samples_r -2;
float sesgo=(erre-MIn) *(num_samples_r/DIST);
if(sesgo>posMax)
result = dLJPot[posInicial+ posMax];
else
if(sesgo<0)
result = dLJPot[posInicial];
else
result = dLJPot[posInicial+(int)ceil(sesgo)];
}
dEr[y*width+x]=result;
/*
if(erre > MAx)
dEr[y*width+x] = dLJPot[posInicial + num_samples_r - 1];
else
if(erre<MIn)
dEr[y*width+x]=dLJPot[posInicial];
else{
int sesgoSup=ceil((erre-MIn)*(num_samples_r/DIST));
int sesgoInf= floor((erre-MIn)*(num_samples_r/DIST));
float y1=dLJPot[posInicial + sesgoSup];
float y0=dLJPot[posInicial + sesgoInf];
double x0=sesgoInf*num_samples_r /DIST;
double a = (y1 - y0) / (1);
double b = -a*x0 + y0;
double ybuscado = a *((erre-MIn)*(num_samples_r/DIST)) + b;
dEr[y*width +x]= ybuscado;
//dEr[y*width +x]=dLJPot[posInicial + sesgoSup];
}
*/
//dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
// *****************************************************************************************
/* ************************************************************************
* ******* DERIVATIVE-MODE CALCULATION ----- TEXTURE ********************
* ************************************************************************
*/
/**********************************************************************
******** DERIVATIVE-MODE CALCULATION ---------- TEXTURE **************/
// THIS KERNEL CALCULATES FORCES(LJ DERIVATIVE) USING A TABLE OF ***DERIVATIVE OF POTENTIALS**** STORED IN DEVICEs TEXTURE MEMORY(DIRECT FETCH)
__global__ void direct_derivativeMode_E_r(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
double result;
/* Dentro del bloque correspondiente */
double erre= r[y*width+x];
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
result=0;
//dEr[y*width+x] = 0;
//return;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * num_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (erre - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/* double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
result = (double) tex2D( texRef, index_x, t_o_p_2 );
}
dEr[y*width+x]= result;
}
/* ***************************************************************** **/
/* ************************************************************************
* ***************** ANALYTIC-MODE CALCULATION **************************
* ************************************************************************
*/
/***************************************************************
*********** AnalyticMode CALCULATION *************************/
__global__ void analyticMode_kernel(double* dEr, double* r, double cut, int* item_to_type, int num_samples_r, double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
double erre=r[y*width+x];
double result;
if(x >= width || y >= height) {return;}
if(x == y || erre >= cut) {
//dEr[y*width+x] = 0; return;
result=0;
}
else{
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
result = (double) 24.0*eps12*( pow(sig12,6)/ pow(erre,7) - 2 * pow(sig12,12)/ pow(erre,13));
}
dEr[y*width+x]=result;
}
/* ***************************************************
* *********** POTENTIALS CALCULATION ****************
* ***************************************************
* ***************************************************
*/
/* ************************************************************************
* ***************** ANALYTIC-MODE CALCULATION **************************
* ************************************************************************
*/
__global__ void potential_analytic_kernel(double* Er, double* r, double cut, int* item_to_type, int num_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** *********************************************************************************** **/
/* ************************************************************************
* ***************** POTENTIALS-MODE CALCULATION **************************
* ************************************************************************
*/
// THIS KERNEL CALCULATES **POTENTIAL** VALUE FROM A TABLE IN TEXTURE MEMORY
__global__ void potentials_texture_kernel(double* Er, double* r, double cut, int* item_to_type,
int num_samples_r, int num_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* num_types);
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
/*
double rposta=r[y*width+x];
if(rposta> MAx)
rposta=MAx;
else
if(rposta<MIn)
rposta=MIn;
float index_x = (float)((double) (rposta - MIn) * (double) num_samples_r / DIST + 0.5); // convert r to x
*/
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/** **************************************************************** **/
|
b7e539cecf7429c29bdb8258c5adcbf1337794d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/split_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
namespace {
#ifdef USE_ROCM
constexpr int kNumElementsPerThread = 2;
constexpr int kNumThreadsPerBlock = 512;
#else
constexpr int kNumElementsPerThread = GridDim::maxElementsPerThread;
constexpr int kNumThreadsPerBlock = GridDim::maxThreadsPerBlock;
#endif
} // namespace
template <typename T, typename OutputDataArray>
__global__ void _SplitKernelSameSplitDim(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div,
const fast_divmod split_dim_size, const int num_outputs, const T* input_data,
OutputDataArray output_data, const CUDA_LONG N) {
CUDA_LONG start = kNumElementsPerThread * kNumThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kNumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
value[i] = input_data[id];
id += kNumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
int outer_block_index, block_index, offset, output_index, block_offset;
block_size_including_axis_dim_div.divmod(id, outer_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
split_dim_size.divmod(block_index, output_index, block_offset);
CUDA_LONG output_pos =
(outer_block_index * split_dim_size.d_ + block_offset) * block_size_inside_axis_dim_div.d_ + offset;
reinterpret_cast<T*>(output_data[output_index])[output_pos] = value[i];
id += kNumThreadsPerBlock;
}
}
}
template <typename OutputDataArray>
Status SplitSameSplitDimImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs,
const void* input_data, OutputDataArray output_data, const size_t input_size) {
CUDA_LONG N = static_cast<CUDA_LONG>(input_size);
int blocksPerGrid = CeilDiv(N, kNumElementsPerThread * kNumThreadsPerBlock);
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
fast_divmod split_size_div = fast_divmod(static_cast<int>(split_size));
switch (element_size) {
#define CASE_ELEMENT_TYPE(type) \
case sizeof(type): { \
hipLaunchKernelGGL(( _SplitKernelSameSplitDim), dim3(blocksPerGrid), dim3(kNumThreadsPerBlock), 0, stream, \
block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_size_div, num_outputs, \
reinterpret_cast<const ToCudaType<type>::MappedType*>(input_data), output_data, N); \
} break
CASE_ELEMENT_TYPE(int8_t);
CASE_ELEMENT_TYPE(int16_t);
CASE_ELEMENT_TYPE(int32_t);
CASE_ELEMENT_TYPE(int64_t);
#undef CASE_ELEMENT_TYPE
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator");
}
return Status::OK();
}
template Status SplitSameSplitDimImpl<void**>(hipStream_t stream, const size_t element_size,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size,
const int num_outputs, const void* input_data, void** output_data,
const size_t input_size);
template Status SplitSameSplitDimImpl<TArray<void*, 32>>(hipStream_t stream, const size_t element_size,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size,
const int num_outputs, const void* input_data,
TArray<void*, 32> output_data, const size_t input_size);
template <typename T>
__global__ void _SplitKernel(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div, const int64_t* split_sizes,
const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping,
const int num_outputs, const T* input_data, void** output_data, const CUDA_LONG N) {
CUDA_LONG start = kNumElementsPerThread * kNumThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kNumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
value[i] = input_data[id];
id += kNumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
int outer_block_index, block_index, offset;
block_size_including_axis_dim_div.divmod(id, outer_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
int output_index = axis_dimension_input_output_mapping[block_index];
int64_t range_left = (output_index == 0) ? 0 : split_sizes_range[output_index - 1];
int block_offset = block_index - static_cast<int>(range_left);
CUDA_LONG output_pos =
(outer_block_index * split_sizes[output_index] + block_offset) * block_size_inside_axis_dim_div.d_ + offset;
reinterpret_cast<T*>(output_data[output_index])[output_pos] = value[i];
id += kNumThreadsPerBlock;
}
}
}
Status SplitImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range,
const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data,
void** output_data, const size_t input_size) {
CUDA_LONG N = static_cast<CUDA_LONG>(input_size);
int blocksPerGrid = CeilDiv(N, kNumElementsPerThread * kNumThreadsPerBlock);
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
switch (element_size) {
#define CASE_ELEMENT_TYPE(type) \
case sizeof(type): { \
hipLaunchKernelGGL(( _SplitKernel), dim3(blocksPerGrid), dim3(kNumThreadsPerBlock), 0, stream, \
block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, \
axis_dimension_input_output_mapping, num_outputs, \
reinterpret_cast<const ToCudaType<type>::MappedType*>(input_data), output_data, N); \
} break
CASE_ELEMENT_TYPE(int8_t);
CASE_ELEMENT_TYPE(int16_t);
CASE_ELEMENT_TYPE(int32_t);
CASE_ELEMENT_TYPE(int64_t);
#undef CASE_ELEMENT_TYPE
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
| b7e539cecf7429c29bdb8258c5adcbf1337794d1.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/split_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
namespace {
#ifdef USE_ROCM
constexpr int kNumElementsPerThread = 2;
constexpr int kNumThreadsPerBlock = 512;
#else
constexpr int kNumElementsPerThread = GridDim::maxElementsPerThread;
constexpr int kNumThreadsPerBlock = GridDim::maxThreadsPerBlock;
#endif
} // namespace
template <typename T, typename OutputDataArray>
__global__ void _SplitKernelSameSplitDim(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div,
const fast_divmod split_dim_size, const int num_outputs, const T* input_data,
OutputDataArray output_data, const CUDA_LONG N) {
CUDA_LONG start = kNumElementsPerThread * kNumThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kNumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
value[i] = input_data[id];
id += kNumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
int outer_block_index, block_index, offset, output_index, block_offset;
block_size_including_axis_dim_div.divmod(id, outer_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
split_dim_size.divmod(block_index, output_index, block_offset);
CUDA_LONG output_pos =
(outer_block_index * split_dim_size.d_ + block_offset) * block_size_inside_axis_dim_div.d_ + offset;
reinterpret_cast<T*>(output_data[output_index])[output_pos] = value[i];
id += kNumThreadsPerBlock;
}
}
}
template <typename OutputDataArray>
Status SplitSameSplitDimImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs,
const void* input_data, OutputDataArray output_data, const size_t input_size) {
CUDA_LONG N = static_cast<CUDA_LONG>(input_size);
int blocksPerGrid = CeilDiv(N, kNumElementsPerThread * kNumThreadsPerBlock);
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
fast_divmod split_size_div = fast_divmod(static_cast<int>(split_size));
switch (element_size) {
#define CASE_ELEMENT_TYPE(type) \
case sizeof(type): { \
_SplitKernelSameSplitDim<<<blocksPerGrid, kNumThreadsPerBlock, 0, stream>>>( \
block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_size_div, num_outputs, \
reinterpret_cast<const ToCudaType<type>::MappedType*>(input_data), output_data, N); \
} break
CASE_ELEMENT_TYPE(int8_t);
CASE_ELEMENT_TYPE(int16_t);
CASE_ELEMENT_TYPE(int32_t);
CASE_ELEMENT_TYPE(int64_t);
#undef CASE_ELEMENT_TYPE
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator");
}
return Status::OK();
}
template Status SplitSameSplitDimImpl<void**>(cudaStream_t stream, const size_t element_size,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size,
const int num_outputs, const void* input_data, void** output_data,
const size_t input_size);
template Status SplitSameSplitDimImpl<TArray<void*, 32>>(cudaStream_t stream, const size_t element_size,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t split_size,
const int num_outputs, const void* input_data,
TArray<void*, 32> output_data, const size_t input_size);
template <typename T>
__global__ void _SplitKernel(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div, const int64_t* split_sizes,
const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping,
const int num_outputs, const T* input_data, void** output_data, const CUDA_LONG N) {
CUDA_LONG start = kNumElementsPerThread * kNumThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kNumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
value[i] = input_data[id];
id += kNumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kNumElementsPerThread; ++i) {
if (id < N) {
int outer_block_index, block_index, offset;
block_size_including_axis_dim_div.divmod(id, outer_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
int output_index = axis_dimension_input_output_mapping[block_index];
int64_t range_left = (output_index == 0) ? 0 : split_sizes_range[output_index - 1];
int block_offset = block_index - static_cast<int>(range_left);
CUDA_LONG output_pos =
(outer_block_index * split_sizes[output_index] + block_offset) * block_size_inside_axis_dim_div.d_ + offset;
reinterpret_cast<T*>(output_data[output_index])[output_pos] = value[i];
id += kNumThreadsPerBlock;
}
}
}
Status SplitImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range,
const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data,
void** output_data, const size_t input_size) {
CUDA_LONG N = static_cast<CUDA_LONG>(input_size);
int blocksPerGrid = CeilDiv(N, kNumElementsPerThread * kNumThreadsPerBlock);
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
switch (element_size) {
#define CASE_ELEMENT_TYPE(type) \
case sizeof(type): { \
_SplitKernel<<<blocksPerGrid, kNumThreadsPerBlock, 0, stream>>>( \
block_size_including_axis_dim_div, block_size_inside_axis_dim_div, split_sizes, split_sizes_range, \
axis_dimension_input_output_mapping, num_outputs, \
reinterpret_cast<const ToCudaType<type>::MappedType*>(input_data), output_data, N); \
} break
CASE_ELEMENT_TYPE(int8_t);
CASE_ELEMENT_TYPE(int16_t);
CASE_ELEMENT_TYPE(int32_t);
CASE_ELEMENT_TYPE(int64_t);
#undef CASE_ELEMENT_TYPE
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Slice operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
3596d0ee2788ef9d537826ba255b29319b8eb72b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void max_prob_to_coord_valid_mvs_kernel(float * prob_map, int * to_coord,
char * board, char * valid_mv_map_internal){
int gm = blockIdx.x;
int gm_offset = gm*MAP_SZ;
float * prob_map_cur = &prob_map[gm_offset];
COUNT_VALID
// determine max prob
float max_prob = -999;
int max_map_loc = -1;
for(int mv_ind = 1; mv_ind < n_valid_mvs; mv_ind++){ // skip pass move
int map_loc = valid_mv_inds[mv_ind];
CHK_VALID_MAP_COORD(map_loc)
DASSERT(board[gm*MAP_SZ + map_loc] == 0)
if(prob_map_cur[map_loc] <= max_prob)
continue;
max_map_loc = map_loc;
max_prob = prob_map_cur[map_loc];
}
to_coord[gm] = max_map_loc;
}
void max_prob_to_coord_valid_mvs_launcher(float * prob_map, int * to_coord){
hipError_t err;
REQ_INIT
hipLaunchKernelGGL(( max_prob_to_coord_valid_mvs_kernel) , dim3(BATCH_SZ), dim3(1) , 0, 0, prob_map, to_coord, board,
valid_mv_map_internal); CHECK_CUDA_ERR
VERIFY_BUFFER_INTEGRITY
}
| 3596d0ee2788ef9d537826ba255b29319b8eb72b.cu | __global__ void max_prob_to_coord_valid_mvs_kernel(float * prob_map, int * to_coord,
char * board, char * valid_mv_map_internal){
int gm = blockIdx.x;
int gm_offset = gm*MAP_SZ;
float * prob_map_cur = &prob_map[gm_offset];
COUNT_VALID
// determine max prob
float max_prob = -999;
int max_map_loc = -1;
for(int mv_ind = 1; mv_ind < n_valid_mvs; mv_ind++){ // skip pass move
int map_loc = valid_mv_inds[mv_ind];
CHK_VALID_MAP_COORD(map_loc)
DASSERT(board[gm*MAP_SZ + map_loc] == 0)
if(prob_map_cur[map_loc] <= max_prob)
continue;
max_map_loc = map_loc;
max_prob = prob_map_cur[map_loc];
}
to_coord[gm] = max_map_loc;
}
void max_prob_to_coord_valid_mvs_launcher(float * prob_map, int * to_coord){
cudaError_t err;
REQ_INIT
max_prob_to_coord_valid_mvs_kernel <<< BATCH_SZ, 1 >>> (prob_map, to_coord, board,
valid_mv_map_internal); CHECK_CUDA_ERR
VERIFY_BUFFER_INTEGRITY
}
|
28507960a223f61d8b73ec96bbfe4346a45d062a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_image2D1C_ConvolveColumn(float* img, int n_x, int n_y, short k, float *kernel, float* out)
{
// Find index of current thread
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_x>=n_x) return;
if (idx_y>=n_y) return;
float sum=0;
for (short i=-k;i<=k;i++)
{
short y=idx_y+i;
if (y<0) y=0;
if (y>=n_y) y=n_y-1;
sum+=kernel[i+k]*img[y*n_x+idx_x];
}
out[idx_y*n_x+idx_x]=sum;
} | 28507960a223f61d8b73ec96bbfe4346a45d062a.cu | #include "includes.h"
__global__ void kernel_image2D1C_ConvolveColumn(float* img, int n_x, int n_y, short k, float *kernel, float* out)
{
// Find index of current thread
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_x>=n_x) return;
if (idx_y>=n_y) return;
float sum=0;
for (short i=-k;i<=k;i++)
{
short y=idx_y+i;
if (y<0) y=0;
if (y>=n_y) y=n_y-1;
sum+=kernel[i+k]*img[y*n_x+idx_x];
}
out[idx_y*n_x+idx_x]=sum;
} |
a3dc15f0dbb24e26150af47a802ba9dd0f21394f.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| a3dc15f0dbb24e26150af47a802ba9dd0f21394f.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
1aec944bc2569ddd7a3b5261ffdff49bbc301c57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Model of heat diffusion - a 2D rectangular environment has its left edge with
a fixed non-zero heat; the other 3 edges have fixed zero heat. The middle of
the environment starts with zero heat, but as the model advances, the heat
of each cell in the middle gets set to the average of its 4 neighbors. The
model advances until the amount of overall heat change from one time step to
the next is sufficiently small.
*/
/*******************************************************************************
IMPORT LIBRARIES
******************************************************************************/
#include <stdio.h>
#include "params.h"
/*******************************************************************************
DEFINE MACROS
******************************************************************************/
// Define the number of CUDA threads in each CUDA warp (group of threads that
// execute instructions in lock-step)
#define THREADS_PER_WARP 32
// Define the maximum number of CUDA warps in each CUDA block
#define MAX_WARPS_PER_BLOCK 16
// Define the number of CUDA threads in each CUDA block
#define THREADS_PER_BLOCK ((THREADS_PER_WARP) * (MAX_WARPS_PER_BLOCK))
// Define the number of CUDA blocks in each CUDA grid
#define BLOCKS_PER_GRID 1
/*******************************************************************************
DECLARE GLOBAL VARIABLES
******************************************************************************/
extern int CellCount; // Total number of cells in the environment
extern int CellCountWithoutEdges; // Total number of cells in the environment,
// not counting the edges
extern int CellFloatByteCount; // Total number of bytes if there are enough
// floats for each cell
extern int CellFloatByteCountWithoutEdges; // Total number of bytes if there are
// enough floats for each cell, not
// counting the edges
extern int CellCharByteCount; // Total number of bytes if there are enough chars
// for each cell
extern float * HostHeats; // Array of heat values for each cell (host memory)
float * DeviceHeats; // Array of heat values for each cell (device memory)
extern float * HostNewHeats; // Array of heat values for each cell in the next
// time step (host memory)
float * DeviceNewHeats; // Array of heat values for each cell in the next time
extern float * HostDiffs; // Array of differences between the heat values for
// each cell in the current and next time steps (host
// memory)
float * DeviceDiffs; // Array of differences between the heat values for each
extern char * OutputStr; // String to output at each time step
extern bool IsStillRunning; // Used to keep track of whether the model should
// continue into the next time step
extern int TimeIdx; // The current time step
/*******************************************************************************
DECLARE FUNCTIONS
******************************************************************************/
void TryCuda(hipError_t const err);
__global__ void AverageNeighborHeats(float const * const DeviceHeats,
float * const DeviceNewHeats,
float * const DeviceDiffs,
int const CellCountWithoutEdges);
__global__ void AdvanceHeats(float * const DeviceHeats,
float const * const DeviceNewHeats,
int const CellCountWithoutEdges);
/*******************************************************************************
DEFINE FUNCTIONS
******************************************************************************/
// Define a function to check whether a CUDA call was successful
void TryCuda(hipError_t const err)
{
if (err != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
extern "C"
{
void InitDeviceMemory()
{
TryCuda(hipMalloc((void**)&DeviceHeats, CellFloatByteCount));
TryCuda(hipMalloc((void**)&DeviceNewHeats,
CellFloatByteCountWithoutEdges));
TryCuda(hipMalloc((void**)&DeviceDiffs, CellFloatByteCountWithoutEdges));
}
}
extern "C"
{
void AdvanceHeatsOnDevice()
{
hipMemcpy(DeviceNewHeats, HostNewHeats, CellFloatByteCountWithoutEdges,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AdvanceHeats), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, DeviceHeats,
DeviceNewHeats,
CellCountWithoutEdges);
hipMemcpy(HostHeats, DeviceHeats, CellFloatByteCount,
hipMemcpyDeviceToHost);
}
}
extern "C"
{
void AverageNeighborHeatsOnDevice()
{
TryCuda(hipMemcpy(DeviceHeats, HostHeats, CellFloatByteCount,
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( AverageNeighborHeats), dim3(BLOCKS_PER_GRID),
dim3(THREADS_PER_BLOCK), 0, 0, DeviceHeats, DeviceNewHeats,
DeviceDiffs,
CellCountWithoutEdges);
TryCuda(hipMemcpy(HostNewHeats, DeviceNewHeats,
CellFloatByteCountWithoutEdges, hipMemcpyDeviceToHost));
TryCuda(hipMemcpy(HostDiffs, DeviceDiffs,
CellFloatByteCountWithoutEdges, hipMemcpyDeviceToHost));
}
}
// Preconditions: Heats has not been updated at TimeIdx and cellIdx
// Postconditions: NewHeats has been updated at TimeIdx and cellIdx
// Diffs has been updated at TimeIdx and cellIdx
__global__ void AverageNeighborHeats(float const * const DeviceHeats,
float * const DeviceNewHeats,
float * const DeviceDiffs,
int const CellCountWithoutEdges)
{
int const cellIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (cellIdx < CellCountWithoutEdges)
{
DeviceNewHeats[cellIdx] = 0.25 * (DeviceHeats[NEW_TO_OLD(cellIdx) -
COLUMN_COUNT] +
DeviceHeats[NEW_TO_OLD(cellIdx) - 1] +
DeviceHeats[NEW_TO_OLD(cellIdx) + 1] +
DeviceHeats[NEW_TO_OLD(cellIdx) +
COLUMN_COUNT]);
DeviceDiffs[cellIdx] = DeviceNewHeats[cellIdx] -
DeviceHeats[NEW_TO_OLD(cellIdx)];
}
}
// Preconditions: NewHeats has been updated at TimeIdx
// Postconditions: Heats has been updated at TimeIdx
__global__ void AdvanceHeats(float * const DeviceHeats,
float const * const DeviceNewHeats,
int const CellCountWithoutEdges)
{
int const cellIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (cellIdx < CellCountWithoutEdges)
{
DeviceHeats[NEW_TO_OLD(cellIdx)] = DeviceNewHeats[cellIdx];
}
}
extern "C"
{
void FinalizeDeviceMemory()
{
// Free device memory
TryCuda(hipFree(DeviceDiffs));
TryCuda(hipFree(DeviceNewHeats));
TryCuda(hipFree(DeviceHeats));
}
}
| 1aec944bc2569ddd7a3b5261ffdff49bbc301c57.cu | /* Model of heat diffusion - a 2D rectangular environment has its left edge with
a fixed non-zero heat; the other 3 edges have fixed zero heat. The middle of
the environment starts with zero heat, but as the model advances, the heat
of each cell in the middle gets set to the average of its 4 neighbors. The
model advances until the amount of overall heat change from one time step to
the next is sufficiently small.
*/
/*******************************************************************************
IMPORT LIBRARIES
******************************************************************************/
#include <stdio.h>
#include "params.h"
/*******************************************************************************
DEFINE MACROS
******************************************************************************/
// Define the number of CUDA threads in each CUDA warp (group of threads that
// execute instructions in lock-step)
#define THREADS_PER_WARP 32
// Define the maximum number of CUDA warps in each CUDA block
#define MAX_WARPS_PER_BLOCK 16
// Define the number of CUDA threads in each CUDA block
#define THREADS_PER_BLOCK ((THREADS_PER_WARP) * (MAX_WARPS_PER_BLOCK))
// Define the number of CUDA blocks in each CUDA grid
#define BLOCKS_PER_GRID 1
/*******************************************************************************
DECLARE GLOBAL VARIABLES
******************************************************************************/
extern int CellCount; // Total number of cells in the environment
extern int CellCountWithoutEdges; // Total number of cells in the environment,
// not counting the edges
extern int CellFloatByteCount; // Total number of bytes if there are enough
// floats for each cell
extern int CellFloatByteCountWithoutEdges; // Total number of bytes if there are
// enough floats for each cell, not
// counting the edges
extern int CellCharByteCount; // Total number of bytes if there are enough chars
// for each cell
extern float * HostHeats; // Array of heat values for each cell (host memory)
float * DeviceHeats; // Array of heat values for each cell (device memory)
extern float * HostNewHeats; // Array of heat values for each cell in the next
// time step (host memory)
float * DeviceNewHeats; // Array of heat values for each cell in the next time
extern float * HostDiffs; // Array of differences between the heat values for
// each cell in the current and next time steps (host
// memory)
float * DeviceDiffs; // Array of differences between the heat values for each
extern char * OutputStr; // String to output at each time step
extern bool IsStillRunning; // Used to keep track of whether the model should
// continue into the next time step
extern int TimeIdx; // The current time step
/*******************************************************************************
DECLARE FUNCTIONS
******************************************************************************/
void TryCuda(cudaError_t const err);
__global__ void AverageNeighborHeats(float const * const DeviceHeats,
float * const DeviceNewHeats,
float * const DeviceDiffs,
int const CellCountWithoutEdges);
__global__ void AdvanceHeats(float * const DeviceHeats,
float const * const DeviceNewHeats,
int const CellCountWithoutEdges);
/*******************************************************************************
DEFINE FUNCTIONS
******************************************************************************/
// Define a function to check whether a CUDA call was successful
void TryCuda(cudaError_t const err)
{
if (err != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
extern "C"
{
void InitDeviceMemory()
{
TryCuda(cudaMalloc((void**)&DeviceHeats, CellFloatByteCount));
TryCuda(cudaMalloc((void**)&DeviceNewHeats,
CellFloatByteCountWithoutEdges));
TryCuda(cudaMalloc((void**)&DeviceDiffs, CellFloatByteCountWithoutEdges));
}
}
extern "C"
{
void AdvanceHeatsOnDevice()
{
cudaMemcpy(DeviceNewHeats, HostNewHeats, CellFloatByteCountWithoutEdges,
cudaMemcpyHostToDevice);
AdvanceHeats<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(DeviceHeats,
DeviceNewHeats,
CellCountWithoutEdges);
cudaMemcpy(HostHeats, DeviceHeats, CellFloatByteCount,
cudaMemcpyDeviceToHost);
}
}
extern "C"
{
void AverageNeighborHeatsOnDevice()
{
TryCuda(cudaMemcpy(DeviceHeats, HostHeats, CellFloatByteCount,
cudaMemcpyHostToDevice));
AverageNeighborHeats<<<BLOCKS_PER_GRID,
THREADS_PER_BLOCK>>>(DeviceHeats, DeviceNewHeats,
DeviceDiffs,
CellCountWithoutEdges);
TryCuda(cudaMemcpy(HostNewHeats, DeviceNewHeats,
CellFloatByteCountWithoutEdges, cudaMemcpyDeviceToHost));
TryCuda(cudaMemcpy(HostDiffs, DeviceDiffs,
CellFloatByteCountWithoutEdges, cudaMemcpyDeviceToHost));
}
}
// Preconditions: Heats has not been updated at TimeIdx and cellIdx
// Postconditions: NewHeats has been updated at TimeIdx and cellIdx
// Diffs has been updated at TimeIdx and cellIdx
__global__ void AverageNeighborHeats(float const * const DeviceHeats,
float * const DeviceNewHeats,
float * const DeviceDiffs,
int const CellCountWithoutEdges)
{
int const cellIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (cellIdx < CellCountWithoutEdges)
{
DeviceNewHeats[cellIdx] = 0.25 * (DeviceHeats[NEW_TO_OLD(cellIdx) -
COLUMN_COUNT] +
DeviceHeats[NEW_TO_OLD(cellIdx) - 1] +
DeviceHeats[NEW_TO_OLD(cellIdx) + 1] +
DeviceHeats[NEW_TO_OLD(cellIdx) +
COLUMN_COUNT]);
DeviceDiffs[cellIdx] = DeviceNewHeats[cellIdx] -
DeviceHeats[NEW_TO_OLD(cellIdx)];
}
}
// Preconditions: NewHeats has been updated at TimeIdx
// Postconditions: Heats has been updated at TimeIdx
__global__ void AdvanceHeats(float * const DeviceHeats,
float const * const DeviceNewHeats,
int const CellCountWithoutEdges)
{
int const cellIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (cellIdx < CellCountWithoutEdges)
{
DeviceHeats[NEW_TO_OLD(cellIdx)] = DeviceNewHeats[cellIdx];
}
}
extern "C"
{
void FinalizeDeviceMemory()
{
// Free device memory
TryCuda(cudaFree(DeviceDiffs));
TryCuda(cudaFree(DeviceNewHeats));
TryCuda(cudaFree(DeviceHeats));
}
}
|
bf555ea934737a4bcc98165eeb8916c99e0283c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BDIM 32
hipDevice_t device;
hipCtx_t context;
hipModule_t module;
hipFunction_t function;
#define module_file "kernel.cubin"
#define kernel_name "arr_kernel"
void initCUDA()
{
int deviceCount = 0;
hipError_t err = hipInit(0);
if (err == hipSuccess)
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "Error: no devices supporting CUDA\n");
exit(-1);
}
// get first CUDA device
hipDeviceGet(&device, 0);
char name[100];
hipDeviceGetName(name, 100, device);
printf("> Using device 0: %s\n", name);
err = hipCtxCreate(&context, 0, device);
if (err != hipSuccess) {
fprintf(stderr, "* Error initializing the CUDA context.\n");
hipCtxDestroy(context);
exit(-1);
}
err = hipModuleLoad(&module, module_file);
if (err != hipSuccess) {
fprintf(stderr, "* Error loading the module %s\n", module_file);
const char * str;
hipGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
hipCtxDestroy(context);
exit(-1);
}
err = hipModuleGetFunction(&function, module, kernel_name);
if (err != hipSuccess) {
fprintf(stderr, "* Error getting kernel function %s\n", kernel_name);
const char * str;
hipGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
hipCtxDestroy(context);
exit(-1);
}
}
int main() {
int size = BDIM * 16 * sizeof(int);
int *in = (int *)malloc(size);
int *out = (int *)malloc(size);
int *in_dev, *out_dev;
initCUDA();
hipMalloc(&in_dev, size);
hipMalloc(&out_dev, size);
for (int i = 0; i < BDIM; ++i)
in[i] = i;
hipMemcpy(in_dev, in, size, hipMemcpyHostToDevice);
void * args[2] = {&in_dev, &out_dev};
hipModuleLaunchKernel(function,
1, 1, 1,
BDIM, 1, 1,
0, 0, args, 0);
// Test
hipMemcpy(out, out_dev, size, hipMemcpyDeviceToHost);
printf("%d\n",out[0]);
return 0;
}
| bf555ea934737a4bcc98165eeb8916c99e0283c6.cu | #include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#define BDIM 32
CUdevice device;
CUcontext context;
CUmodule module;
CUfunction function;
#define module_file "kernel.cubin"
#define kernel_name "arr_kernel"
void initCUDA()
{
int deviceCount = 0;
CUresult err = cuInit(0);
if (err == CUDA_SUCCESS)
cuDeviceGetCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "Error: no devices supporting CUDA\n");
exit(-1);
}
// get first CUDA device
cuDeviceGet(&device, 0);
char name[100];
cuDeviceGetName(name, 100, device);
printf("> Using device 0: %s\n", name);
err = cuCtxCreate(&context, 0, device);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error initializing the CUDA context.\n");
cuCtxDestroy(context);
exit(-1);
}
err = cuModuleLoad(&module, module_file);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error loading the module %s\n", module_file);
const char * str;
cuGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
cuCtxDestroy(context);
exit(-1);
}
err = cuModuleGetFunction(&function, module, kernel_name);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error getting kernel function %s\n", kernel_name);
const char * str;
cuGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
cuCtxDestroy(context);
exit(-1);
}
}
int main() {
int size = BDIM * 16 * sizeof(int);
int *in = (int *)malloc(size);
int *out = (int *)malloc(size);
int *in_dev, *out_dev;
initCUDA();
cudaMalloc(&in_dev, size);
cudaMalloc(&out_dev, size);
for (int i = 0; i < BDIM; ++i)
in[i] = i;
cudaMemcpy(in_dev, in, size, cudaMemcpyHostToDevice);
void * args[2] = {&in_dev, &out_dev};
cuLaunchKernel(function,
1, 1, 1,
BDIM, 1, 1,
0, 0, args, 0);
// Test
cudaMemcpy(out, out_dev, size, cudaMemcpyDeviceToHost);
printf("%d\n",out[0]);
return 0;
}
|
bf7c64b9917085b9dd25cdc72ccfc4a50dd372ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sphericalVertexModel.cuh"
/*!
\addtogroup modelKernels
@{
*/
__global__ void gpu_move_particles_on_sphere_kernel(dVec *pos,
dVec *disp,
sphericalDomain sphere,
scalar scale,
int N
)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
sphere.move(pos[idx],scale * disp[idx]);
};
bool gpu_move_particles_on_sphere(dVec *pos,
dVec *disp,
sphericalDomain &sphere,
scalar scale,
int N
)
{
unsigned int block_size = 512;
if (N < 512) block_size = 32;
unsigned int nblocks = N/block_size + 1;
hipLaunchKernelGGL(( gpu_move_particles_on_sphere_kernel), dim3(nblocks),dim3(block_size), 0, 0, pos,disp,sphere,scale,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
__global__ void gpu_spherical_vertex_model_geometry_kernel(dVec *vertexPos,
dVec *cellPos,
int *cellNeighbors,
int *vertexCellNeighbors,
unsigned int *vertexCellNumberOfNeighbors,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
Index2D cellNeighborIndex,
Index2D neighborIndex,
sphericalDomain sphere,
int nCells
)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nCells)
return;
int neighs = cellNumberOfNeighbors[idx];
dVec cPos(0.0);
for (int nn = 0; nn < neighs;++nn)
cPos = cPos + vertexPos[cellNeighbors[cellNeighborIndex(nn,idx)]];
sphere.putInBoxReal(cPos);
cellPos[idx] = cPos;
//if(idx ==0)
// printf("%f %f %f\n", cellPos[idx][0],cellPos[idx][1],cellPos[idx][2]);
int lastVertexIdx = cellNeighbors[cellNeighborIndex(neighs-2,idx)];
int curVertexIdx = cellNeighbors[cellNeighborIndex(neighs-1,idx)];
int nextVertexIdx;
dVec lastVertexPos = vertexPos[lastVertexIdx];
dVec curVertexPos = vertexPos[curVertexIdx];
dVec nextVertexPos;
scalar perimeter = 0.;
scalar area = 0.;
scalar tempVal;
for (int nn = 0; nn < neighs; ++nn)
{
int cni = cellNeighborIndex(nn,idx);
int vNeighs = vertexCellNumberOfNeighbors[curVertexIdx];
int forceSetIdx = -1;
for (int vn = 0; vn < vNeighs; ++vn)
{
int newIdx = neighborIndex(vn,curVertexIdx);
if(vertexCellNeighbors[newIdx] == idx)
forceSetIdx = newIdx;
}
nextVertexIdx = cellNeighbors[cni];
nextVertexPos = vertexPos[nextVertexIdx];
sphere.geodesicDistance(lastVertexPos,curVertexPos,tempVal);
perimeter += tempVal;
sphere.includedAngle(lastVertexPos,curVertexPos,nextVertexPos,tempVal);
area += tempVal;
lastVertexAroundCell[forceSetIdx] = lastVertexPos;
currentVertexAroundCell[forceSetIdx] = curVertexPos;
nextVertexAroundCell[forceSetIdx] = nextVertexPos;
lastVertexPos = curVertexPos;
curVertexIdx = nextVertexIdx;
curVertexPos = nextVertexPos;
}
area = (area-(neighs-2)*PI);
int extraAngularArea = floor(area/(1.0*PI));
if(extraAngularArea > 0)
area -= extraAngularArea*PI;
area *= (sphere.radius*sphere.radius);
areaPerimeter[idx].x = area;
areaPerimeter[idx].y = perimeter;
};
bool gpu_spherical_vertex_model_geometry(dVec *vertexPos,
dVec *cellPos,
int *cellNeighbors,
int *vertexCellNeighbors,
unsigned int *vertexCellNumberOfNeighbors,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
Index2D cellNeighborIndex,
Index2D neighborIndex,
sphericalDomain &sphere,
int nCells
)
{
unsigned int block_size = 512;
if (nCells < 512) block_size = 32;
unsigned int nblocks = nCells/block_size + 1;
hipLaunchKernelGGL(( gpu_spherical_vertex_model_geometry_kernel), dim3(nblocks),dim3(block_size), 0, 0,
vertexPos,cellPos,cellNeighbors,vertexCellNeighbors,vertexCellNumberOfNeighbors,
currentVertexAroundCell,lastVertexAroundCell,nextVertexAroundCell,
cellNumberOfNeighbors,areaPerimeter,cellNeighborIndex,neighborIndex,sphere,
nCells);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
};
__global__ void gpu_quadratic_spherical_cellular_force_kernel(dVec *cellPos,
dVec *vertexPos,
dVec *forces,
int *vertexCellNeighbors,
unsigned int *vertexCellNeighborNumber,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
scalar2 *areaPerimeterPreference,
Index2D neighborIndex,
scalar Kr,
sphericalDomain sphere,
int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
dVec vLast,vCur,vNext,cPos,tempVar;
dVec f(0.0);
int vNeighs = vertexCellNeighborNumber[idx];
for (int cc = 0; cc < vNeighs; ++cc)
{
dVec fSet(0.0);
int vni = neighborIndex(cc,idx);
int cellIndex = vertexCellNeighbors[vni];
cPos = cellPos[cellIndex];
vLast = lastVertexAroundCell[vni];
vCur = currentVertexAroundCell[vni];
vNext =nextVertexAroundCell[vni];
scalar areaDifference = areaPerimeter[cellIndex].x - areaPerimeterPreference[cellIndex].x;
scalar perimeterDifference = areaPerimeter[cellIndex].y - areaPerimeterPreference[cellIndex].y;
dVec thetaHat, phiHat;
sphere.cartesianSphericalBasisChange(vCur,thetaHat,phiHat);
sphere.gradientGeodesicDistance(vCur,vLast,tempVar,thetaHat,phiHat);
fSet -= 2.0*perimeterDifference*tempVar;
sphere.gradientGeodesicDistance(vCur,vNext,tempVar,thetaHat,phiHat);
fSet -= 2.0*perimeterDifference*tempVar;
sphere.gradientTriangleArea(vCur,vLast,cPos,tempVar,thetaHat,phiHat);
fSet -= 2.0*Kr*areaDifference*tempVar;
sphere.gradientTriangleArea(vCur,cPos,vNext,tempVar,thetaHat,phiHat);
fSet -= 2.0*Kr*areaDifference*tempVar;
if(!isnan(fSet[0]))
f += fSet;
};
forces[idx] = f;
};
bool gpu_quadratic_spherical_cellular_force(dVec *cellPos,
dVec *vertexPos,
dVec *forces,
int *vertexCellNeighbors,
unsigned int *vertexCellNeighborNumber,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
scalar2 *areaPerimeterPreference,
Index2D neighborIndex,
scalar Kr,
sphericalDomain &sphere,
int N)
{
unsigned int block_size = 512;
if (N < 512) block_size = 32;
unsigned int nblocks = N/block_size + 1;
hipLaunchKernelGGL(( gpu_quadratic_spherical_cellular_force_kernel), dim3(nblocks),dim3(block_size), 0, 0, cellPos,vertexPos,forces,
vertexCellNeighbors,vertexCellNeighborNumber,currentVertexAroundCell,lastVertexAroundCell,nextVertexAroundCell,
cellNumberOfNeighbors,areaPerimeter,areaPerimeterPreference,neighborIndex,Kr,sphere,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
};
__global__ void vm_simple_T1_test_kernel(dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexEdgeFlips,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
sphericalDomain sphere,
scalar T1THRESHOLD,
int NvTimes3,
int vertexMax,
int *d_grow,
Index2D cellNeighborIndex)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= NvTimes3)
return;
int vertex1 = idx/3;
int vertex2 = d_vertexNeighbors[idx];
scalar arcLength;
if(vertex1 < vertex2)
{
sphere.geodesicDistance( d_vertexPositions[vertex1], d_vertexPositions[vertex2],arcLength);
if(arcLength < T1THRESHOLD)
{
d_vertexEdgeFlips[idx]=1;
//test the number of neighbors of the cells connected to v1 and v2 to see if the
//cell list should grow. This is kind of slow, and I wish I could optimize it away,
//or at least not test for it during every time step. The latter seems pretty doable.
//But this is boring, so we'll revisit if optimizations require it
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1+1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1+2]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2+1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2+2]] == vertexMax)
d_grow[0] = 1;
}
else
d_vertexEdgeFlips[idx]=0;
}
else
d_vertexEdgeFlips[idx] = 0;
};
//!Test every edge for a potential T1 event; see if vertexMax needs to increase
bool gpu_vm_test_edges_for_T1(dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexEdgeFlips,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
sphericalDomain &sphere,
scalar T1THRESHOLD,
int Nvertices,
int vertexMax,
int *d_grow,
Index2D &cellNeighborIndex)
{
unsigned int blockSize = 512;
int nV3 = Nvertices*3;
if (nV3 < 512) blockSize = 32;
unsigned int nBlocks = nV3/blockSize + 1;
hipLaunchKernelGGL(( vm_simple_T1_test_kernel), dim3(nBlocks),dim3(blockSize), 0, 0, d_vertexPositions,d_vertexNeighbors,
d_vertexEdgeFlips,d_vertexCellNeighbors,
d_cellVertexNum,d_cellVertices,
sphere,T1THRESHOLD,
nV3,vertexMax,d_grow,cellNeighborIndex);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
};
/*!
There will be severe topology mismatches if a cell is involved in more than one T1 transition
simultaneously (due to incoherent updates of the cellVertices structure). So, go through the
current list of edges that are marked to take part in a T1 transition and select one edge per
cell to be flipped on this trip through the functions.
*/
__global__ void vm_one_T1_per_cell_per_vertex_kernel(
int* __restrict__ d_vertexEdgeFlips,
int* __restrict__ d_vertexEdgeFlipsCurrent,
const int* __restrict__ d_vertexNeighbors,
const int* __restrict__ d_vertexCellNeighbors,
const unsigned int* __restrict__ d_cellVertexNum,
const int * __restrict__ d_cellVertices,
int *d_finishedFlippingEdges,
int *d_cellEdgeFlips,
int4 *d_cellSets,
Index2D cellNeighborIndex,
int Ncells)
{
unsigned int cell = blockDim.x * blockIdx.x + threadIdx.x;
if (cell >= Ncells)
return;
//look through every vertex of the cell
int cneigh = d_cellVertexNum[cell];
int vertex;
bool flipFound = false;
bool moreFlipsFound = false;
for (int cc = 0; cc < cneigh; ++cc)
{
vertex = d_cellVertices[cellNeighborIndex(cc,cell)];
//what are the other cells attached to this vertex? For correctness, only one cell should
//own each vertex here. For simplicity, only the lowest-indexed cell gets to do any work.
int c1,c2,c3,c4;
c1 = d_vertexCellNeighbors[3*vertex];
c2 = d_vertexCellNeighbors[3*vertex+1];
c3 = d_vertexCellNeighbors[3*vertex+2];
if(c1 < cell || c2 < cell || c3 < cell)
continue;
for (int idx = 3*vertex; idx < 3*vertex+3; ++idx)
{
if(d_vertexEdgeFlips[idx] == 1)
{
int vertex2 = d_vertexNeighbors[idx];
int ctest;
for (int ff = 0; ff < 3; ++ff)
{
ctest = d_vertexCellNeighbors[3*vertex2+ff];
if(ctest != c1 && ctest != c2 && ctest != c3)
c4=ctest;
}
if (flipFound)
{
moreFlipsFound = true;
break;
}
//check if the cells have been reserved; if not reserve them
int cc1 = atomicExch(&(d_cellEdgeFlips[c1]),1);
int cc2 = atomicExch(&(d_cellEdgeFlips[c2]),1);
int cc3 = atomicExch(&(d_cellEdgeFlips[c3]),1);
int cc4 = atomicExch(&(d_cellEdgeFlips[c4]),1);
flipFound = true;
if(cc1 ==0 && cc2 ==0 &&cc3==0&&cc4==0)
{
// printf("(%i,%i,%i,%i)\t(%i,%i)\n",c1,c2,c3,c4,vertex,vertex2);
atomicExch(&d_vertexEdgeFlipsCurrent[idx],1);
atomicExch(&d_vertexEdgeFlips[idx],0);
int4 cs;cs.x=c1;cs.y=c2;cs.z=c3;cs.w=c4;
d_cellSets[idx] = cs;
};
}
};
};
if (flipFound)
{
d_finishedFlippingEdges[0] = 1;
if(moreFlipsFound)
d_finishedFlippingEdges[1] = 1;
};
};
bool gpu_vm_parse_multiple_flips(
int *d_vertexEdgeFlips,
int *d_vertexEdgeFlipsCurrent,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_finishedFlippingEdges,
int *d_edgeFlips,
int4 *d_cellSets,
Index2D &cellNeighborIndex,
int Ncells)
{
unsigned int block_size = 512;
/*The issue is that if a cell is involved in two edge flips done by different threads, the resulting
data structure for what vertices belong to cells and what cells border which vertex will be
inconsistently updated.
The strategy will be to take the d_vertexEdgeFlips list, put at most one T1 per cell per vertex into the
d_vertexEdgeFlipsCurrent list (erasing it from the d_vertexEdgeFlips list), and swap the edges specified
by the "current" list. If d_vertexEdgeFlips is empty, we will set d_finishedFlippingEdges[0] to 1,
and if any cell has multiple edges to flip, we set d_finishedFlippingEdges[1] to 1. As long
as the zeroth entry is 1, the flip edges kernel is called; as long as the first entry is 1 the cpp code will continue calling this gpu_avm_flip_edges function.
*/
//first select a few edges to flip...
if(Ncells <512) block_size = 32;
unsigned int nblocks = Ncells/block_size + 1;
hipLaunchKernelGGL(( vm_one_T1_per_cell_per_vertex_kernel), dim3(nblocks),dim3(block_size), 0, 0,
d_vertexEdgeFlips,
d_vertexEdgeFlipsCurrent,
d_vertexNeighbors,
d_vertexCellNeighbors,
d_cellVertexNum,
d_cellVertices,
d_finishedFlippingEdges,
d_edgeFlips,
d_cellSets,
cellNeighborIndex,
Ncells);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
/*!
Flip any edge labeled for re-wiring in the vertexEdgeFlipsCurrent list
*/
__global__ void vm_flip_edges_kernel(int *d_vertexEdgeFlipsCurrent,
dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_cellEdgeFlips,
int4 *d_cellSets,
sphericalDomain sphere,
Index2D cellNeighborIndex,
int NvTimes3
)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
//return if the index is out of bounds or if the edge isn't marked for flipping
if (idx >= NvTimes3 || d_vertexEdgeFlipsCurrent[idx] == 0)
return;
//identify the vertices and reset the flag
int vertex1 = idx/3;
int vertex2 = d_vertexNeighbors[idx];
d_vertexEdgeFlipsCurrent[idx] = 0;
//first, identify the cell and vertex set involved...
int4 cellSet;cellSet.x=-1;cellSet.y=-1;cellSet.z=-1;cellSet.w=-1;
//int4 vertexSet;
int2 vertexSet;//vertexSet.x = "b", vertexSet.y = "a"
/*
The following is fairly terrible GPU code, and should be considered for refactoring
*/
int4 cells = d_cellSets[idx];
int cell1,cell2,cell3;
int vlast, vcur, vnext, cneigh;
cell1 = cells.x;
cell2 = cells.y;
cell3 = cells.z;
cellSet.w = cells.w;
//classify cell1
cneigh = d_cellVertexNum[cell1];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell1) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell1) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell1)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell1;
else if(vnext == vertex2)
cellSet.z = cell1;
else
{
cellSet.y = cell1;
};
//classify cell2
cneigh = d_cellVertexNum[cell2];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell2) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell2) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell2)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell2;
else if(vnext == vertex2)
cellSet.z = cell2;
else
{
cellSet.y = cell2;
};
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell1)];
}
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell2)];
}
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell3)];
}
//classify cell3
cneigh = d_cellVertexNum[cell3];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell3) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell3) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell3)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell3;
else if(vnext == vertex2)
cellSet.z = cell3;
else
{
cellSet.y = cell3;
};
//get the vertexSet by examining cells j and l
cneigh = d_cellVertexNum[cellSet.y];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cellSet.y) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cellSet.y) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cellSet.y)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
vertexSet.x=vnext;
cneigh = d_cellVertexNum[cellSet.w];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cellSet.w) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cellSet.w) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cellSet.w)];
if(vcur == vertex2) break;
vlast = vcur;
vcur = vnext;
};
vertexSet.y=vnext;
d_cellEdgeFlips[cells.x] = 0;
d_cellEdgeFlips[cells.y] = 0;
d_cellEdgeFlips[cells.z] = 0;
d_cellEdgeFlips[cells.w] = 0;
//forbid a T1 transition that would shrink a triangular cell
if (d_cellVertexNum[cellSet.x] ==3 || d_cellVertexNum[cellSet.z] ==3)
return;
if(cellSet.x <0 || cellSet.y < 0 || cellSet.z <0 || cellSet.w <0)
return;
//okay, we're ready to go. First, rotate the vertices in the edge and set them at twice their original distance
dVec v1 = d_vertexPositions[vertex1];
dVec v2 = d_vertexPositions[vertex2];
dVec midpoint = 0.5*(v1+v2);
sphere.putInBoxVirtual(midpoint);
//chose the angle of rotation based on whether the edges are currently crossed...
dVec vC = d_vertexPositions[vertexSet.y];//vSet.y is vSet.z
scalar determinant = vC[0]*(v1[1]*v2[2]-v1[2]*v2[1])
+vC[1]*(v1[2]*v2[0]-v1[0]*v2[2])
+vC[2]*(v1[0]*v2[1]-v1[1]*v2[0]);
determinant = determinant > 0 ? 1. : -1. ;
rodriguesRotation(v1,midpoint,-0.5*determinant*PI);
rodriguesRotation(v2,midpoint,-0.5*determinant*PI);
dVec diff = 0.5*(v1-v2);
v1 = v1 + diff;
v2 = v2 - diff;
sphere.putInBoxReal(v1);
sphere.putInBoxReal(v2);
d_vertexPositions[vertex1] = v1;
d_vertexPositions[vertex2] = v2;
//now, re-wire the cells and vertices
//start with the vertex-vertex and vertex-cell neighbors
for (int vert = 0; vert < 3; ++vert)
{
//vertex-cell neighbors
if(d_vertexCellNeighbors[3*vertex1+vert] == cellSet.z)
d_vertexCellNeighbors[3*vertex1+vert] = cellSet.w;
if(d_vertexCellNeighbors[3*vertex2+vert] == cellSet.x)
d_vertexCellNeighbors[3*vertex2+vert] = cellSet.y;
//vertex-vertex neighbors
if(d_vertexNeighbors[3*vertex1+vert] == vertexSet.x)
d_vertexNeighbors[3*vertex1+vert] = vertexSet.y;
if(d_vertexNeighbors[3*vertex2+vert] == vertexSet.y)
d_vertexNeighbors[3*vertex2+vert] = vertexSet.x;
if(d_vertexNeighbors[3*vertexSet.x+vert] == vertex1)
d_vertexNeighbors[3*vertexSet.x+vert] = vertex2;
if(d_vertexNeighbors[3*vertexSet.y+vert] == vertex2)
d_vertexNeighbors[3*vertexSet.y+vert] = vertex1;
};
//now rewire the cells...
//cell i loses v2 as a neighbor
cneigh = d_cellVertexNum[cellSet.x];
int cidx = 0;
for (int cc = 0; cc < cneigh-1; ++cc)
{
if(d_cellVertices[cellNeighborIndex(cc,cellSet.x)] == vertex2)
cidx +=1;
d_cellVertices[cellNeighborIndex(cc,cellSet.x)] = d_cellVertices[cellNeighborIndex(cidx,cellSet.x)];
cidx +=1;
};
d_cellVertexNum[cellSet.x] -= 1;
//cell j gains v2 in between v1 and b, so step through list backwards and insert
cneigh = d_cellVertexNum[cellSet.y];
bool found0 = false;
for (int cc = cneigh-1; cc >= 0; --cc)
{
int cellIndex = d_cellVertices[cellNeighborIndex(cc,cellSet.y)];
if(!found0)
d_cellVertices[cellNeighborIndex(cc+1,cellSet.y)] = cellIndex;
if(cellIndex == vertexSet.x)
{
found0 = true;
d_cellVertices[cellNeighborIndex(cc,cellSet.y)] = vertex2;
}
}
d_cellVertexNum[cellSet.y] += 1;
//cell k loses v1 as a neighbor
cneigh = d_cellVertexNum[cellSet.z];
cidx = 0;
for (int cc = 0; cc < cneigh-1; ++cc)
{
if(d_cellVertices[cellNeighborIndex(cc,cellSet.z)] == vertex1)
cidx +=1;
d_cellVertices[cellNeighborIndex(cc,cellSet.z)] = d_cellVertices[cellNeighborIndex(cidx,cellSet.z)];
cidx +=1;
};
d_cellVertexNum[cellSet.z] -= 1;
//cell l gains v1 in between v2 and a...copy the logic of cell j
cneigh = d_cellVertexNum[cellSet.w];
bool found = false;
for (int cc = cneigh-1; cc >= 0; --cc)
{
int cellIndex = d_cellVertices[cellNeighborIndex(cc,cellSet.w)];
if(!found)
d_cellVertices[cellNeighborIndex(cc+1,cellSet.w)] = cellIndex;
if(cellIndex == vertexSet.y)
{
found = true;
d_cellVertices[cellNeighborIndex(cc,cellSet.w)] = vertex1;
}
}
d_cellVertexNum[cellSet.w] += 1;
}
bool gpu_vm_flip_edges(
int *d_vertexEdgeFlipsCurrent,
dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_cellEdgeFlips,
int4 *d_cellSets,
sphericalDomain &sphere,
Index2D &cellNeighborIndex,
int Nvertices,
int Ncells)
{
unsigned int block_size = 128;
int NvTimes3 = Nvertices*3;
if (NvTimes3 < 128) block_size = 32;
unsigned int nblocks = NvTimes3/block_size + 1;
hipLaunchKernelGGL(( vm_flip_edges_kernel), dim3(nblocks),dim3(block_size), 0, 0,
d_vertexEdgeFlipsCurrent,d_vertexPositions,d_vertexNeighbors,
d_vertexCellNeighbors,d_cellVertexNum,d_cellVertices,d_cellEdgeFlips,d_cellSets,
sphere,
cellNeighborIndex,NvTimes3);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
};
/** @} */ //end of group declaration
| bf7c64b9917085b9dd25cdc72ccfc4a50dd372ad.cu | #include "sphericalVertexModel.cuh"
/*!
\addtogroup modelKernels
@{
*/
__global__ void gpu_move_particles_on_sphere_kernel(dVec *pos,
dVec *disp,
sphericalDomain sphere,
scalar scale,
int N
)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
sphere.move(pos[idx],scale * disp[idx]);
};
bool gpu_move_particles_on_sphere(dVec *pos,
dVec *disp,
sphericalDomain &sphere,
scalar scale,
int N
)
{
unsigned int block_size = 512;
if (N < 512) block_size = 32;
unsigned int nblocks = N/block_size + 1;
gpu_move_particles_on_sphere_kernel<<<nblocks,block_size>>>(pos,disp,sphere,scale,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
__global__ void gpu_spherical_vertex_model_geometry_kernel(dVec *vertexPos,
dVec *cellPos,
int *cellNeighbors,
int *vertexCellNeighbors,
unsigned int *vertexCellNumberOfNeighbors,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
Index2D cellNeighborIndex,
Index2D neighborIndex,
sphericalDomain sphere,
int nCells
)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nCells)
return;
int neighs = cellNumberOfNeighbors[idx];
dVec cPos(0.0);
for (int nn = 0; nn < neighs;++nn)
cPos = cPos + vertexPos[cellNeighbors[cellNeighborIndex(nn,idx)]];
sphere.putInBoxReal(cPos);
cellPos[idx] = cPos;
//if(idx ==0)
// printf("%f %f %f\n", cellPos[idx][0],cellPos[idx][1],cellPos[idx][2]);
int lastVertexIdx = cellNeighbors[cellNeighborIndex(neighs-2,idx)];
int curVertexIdx = cellNeighbors[cellNeighborIndex(neighs-1,idx)];
int nextVertexIdx;
dVec lastVertexPos = vertexPos[lastVertexIdx];
dVec curVertexPos = vertexPos[curVertexIdx];
dVec nextVertexPos;
scalar perimeter = 0.;
scalar area = 0.;
scalar tempVal;
for (int nn = 0; nn < neighs; ++nn)
{
int cni = cellNeighborIndex(nn,idx);
int vNeighs = vertexCellNumberOfNeighbors[curVertexIdx];
int forceSetIdx = -1;
for (int vn = 0; vn < vNeighs; ++vn)
{
int newIdx = neighborIndex(vn,curVertexIdx);
if(vertexCellNeighbors[newIdx] == idx)
forceSetIdx = newIdx;
}
nextVertexIdx = cellNeighbors[cni];
nextVertexPos = vertexPos[nextVertexIdx];
sphere.geodesicDistance(lastVertexPos,curVertexPos,tempVal);
perimeter += tempVal;
sphere.includedAngle(lastVertexPos,curVertexPos,nextVertexPos,tempVal);
area += tempVal;
lastVertexAroundCell[forceSetIdx] = lastVertexPos;
currentVertexAroundCell[forceSetIdx] = curVertexPos;
nextVertexAroundCell[forceSetIdx] = nextVertexPos;
lastVertexPos = curVertexPos;
curVertexIdx = nextVertexIdx;
curVertexPos = nextVertexPos;
}
area = (area-(neighs-2)*PI);
int extraAngularArea = floor(area/(1.0*PI));
if(extraAngularArea > 0)
area -= extraAngularArea*PI;
area *= (sphere.radius*sphere.radius);
areaPerimeter[idx].x = area;
areaPerimeter[idx].y = perimeter;
};
bool gpu_spherical_vertex_model_geometry(dVec *vertexPos,
dVec *cellPos,
int *cellNeighbors,
int *vertexCellNeighbors,
unsigned int *vertexCellNumberOfNeighbors,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
Index2D cellNeighborIndex,
Index2D neighborIndex,
sphericalDomain &sphere,
int nCells
)
{
unsigned int block_size = 512;
if (nCells < 512) block_size = 32;
unsigned int nblocks = nCells/block_size + 1;
gpu_spherical_vertex_model_geometry_kernel<<<nblocks,block_size>>>
(vertexPos,cellPos,cellNeighbors,vertexCellNeighbors,vertexCellNumberOfNeighbors,
currentVertexAroundCell,lastVertexAroundCell,nextVertexAroundCell,
cellNumberOfNeighbors,areaPerimeter,cellNeighborIndex,neighborIndex,sphere,
nCells);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
};
__global__ void gpu_quadratic_spherical_cellular_force_kernel(dVec *cellPos,
dVec *vertexPos,
dVec *forces,
int *vertexCellNeighbors,
unsigned int *vertexCellNeighborNumber,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
scalar2 *areaPerimeterPreference,
Index2D neighborIndex,
scalar Kr,
sphericalDomain sphere,
int N)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
dVec vLast,vCur,vNext,cPos,tempVar;
dVec f(0.0);
int vNeighs = vertexCellNeighborNumber[idx];
for (int cc = 0; cc < vNeighs; ++cc)
{
dVec fSet(0.0);
int vni = neighborIndex(cc,idx);
int cellIndex = vertexCellNeighbors[vni];
cPos = cellPos[cellIndex];
vLast = lastVertexAroundCell[vni];
vCur = currentVertexAroundCell[vni];
vNext =nextVertexAroundCell[vni];
scalar areaDifference = areaPerimeter[cellIndex].x - areaPerimeterPreference[cellIndex].x;
scalar perimeterDifference = areaPerimeter[cellIndex].y - areaPerimeterPreference[cellIndex].y;
dVec thetaHat, phiHat;
sphere.cartesianSphericalBasisChange(vCur,thetaHat,phiHat);
sphere.gradientGeodesicDistance(vCur,vLast,tempVar,thetaHat,phiHat);
fSet -= 2.0*perimeterDifference*tempVar;
sphere.gradientGeodesicDistance(vCur,vNext,tempVar,thetaHat,phiHat);
fSet -= 2.0*perimeterDifference*tempVar;
sphere.gradientTriangleArea(vCur,vLast,cPos,tempVar,thetaHat,phiHat);
fSet -= 2.0*Kr*areaDifference*tempVar;
sphere.gradientTriangleArea(vCur,cPos,vNext,tempVar,thetaHat,phiHat);
fSet -= 2.0*Kr*areaDifference*tempVar;
if(!isnan(fSet[0]))
f += fSet;
};
forces[idx] = f;
};
bool gpu_quadratic_spherical_cellular_force(dVec *cellPos,
dVec *vertexPos,
dVec *forces,
int *vertexCellNeighbors,
unsigned int *vertexCellNeighborNumber,
dVec *currentVertexAroundCell,
dVec *lastVertexAroundCell,
dVec *nextVertexAroundCell,
unsigned int *cellNumberOfNeighbors,
scalar2 *areaPerimeter,
scalar2 *areaPerimeterPreference,
Index2D neighborIndex,
scalar Kr,
sphericalDomain &sphere,
int N)
{
unsigned int block_size = 512;
if (N < 512) block_size = 32;
unsigned int nblocks = N/block_size + 1;
gpu_quadratic_spherical_cellular_force_kernel<<<nblocks,block_size>>>(cellPos,vertexPos,forces,
vertexCellNeighbors,vertexCellNeighborNumber,currentVertexAroundCell,lastVertexAroundCell,nextVertexAroundCell,
cellNumberOfNeighbors,areaPerimeter,areaPerimeterPreference,neighborIndex,Kr,sphere,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
};
__global__ void vm_simple_T1_test_kernel(dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexEdgeFlips,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
sphericalDomain sphere,
scalar T1THRESHOLD,
int NvTimes3,
int vertexMax,
int *d_grow,
Index2D cellNeighborIndex)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= NvTimes3)
return;
int vertex1 = idx/3;
int vertex2 = d_vertexNeighbors[idx];
scalar arcLength;
if(vertex1 < vertex2)
{
sphere.geodesicDistance( d_vertexPositions[vertex1], d_vertexPositions[vertex2],arcLength);
if(arcLength < T1THRESHOLD)
{
d_vertexEdgeFlips[idx]=1;
//test the number of neighbors of the cells connected to v1 and v2 to see if the
//cell list should grow. This is kind of slow, and I wish I could optimize it away,
//or at least not test for it during every time step. The latter seems pretty doable.
//But this is boring, so we'll revisit if optimizations require it
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1+1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex1+2]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2+1]] == vertexMax)
d_grow[0] = 1;
if(d_cellVertexNum[d_vertexCellNeighbors[3*vertex2+2]] == vertexMax)
d_grow[0] = 1;
}
else
d_vertexEdgeFlips[idx]=0;
}
else
d_vertexEdgeFlips[idx] = 0;
};
//!Test every edge for a potential T1 event; see if vertexMax needs to increase
bool gpu_vm_test_edges_for_T1(dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexEdgeFlips,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
sphericalDomain &sphere,
scalar T1THRESHOLD,
int Nvertices,
int vertexMax,
int *d_grow,
Index2D &cellNeighborIndex)
{
unsigned int blockSize = 512;
int nV3 = Nvertices*3;
if (nV3 < 512) blockSize = 32;
unsigned int nBlocks = nV3/blockSize + 1;
vm_simple_T1_test_kernel<<<nBlocks,blockSize>>>(d_vertexPositions,d_vertexNeighbors,
d_vertexEdgeFlips,d_vertexCellNeighbors,
d_cellVertexNum,d_cellVertices,
sphere,T1THRESHOLD,
nV3,vertexMax,d_grow,cellNeighborIndex);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
};
/*!
There will be severe topology mismatches if a cell is involved in more than one T1 transition
simultaneously (due to incoherent updates of the cellVertices structure). So, go through the
current list of edges that are marked to take part in a T1 transition and select one edge per
cell to be flipped on this trip through the functions.
*/
__global__ void vm_one_T1_per_cell_per_vertex_kernel(
int* __restrict__ d_vertexEdgeFlips,
int* __restrict__ d_vertexEdgeFlipsCurrent,
const int* __restrict__ d_vertexNeighbors,
const int* __restrict__ d_vertexCellNeighbors,
const unsigned int* __restrict__ d_cellVertexNum,
const int * __restrict__ d_cellVertices,
int *d_finishedFlippingEdges,
int *d_cellEdgeFlips,
int4 *d_cellSets,
Index2D cellNeighborIndex,
int Ncells)
{
unsigned int cell = blockDim.x * blockIdx.x + threadIdx.x;
if (cell >= Ncells)
return;
//look through every vertex of the cell
int cneigh = d_cellVertexNum[cell];
int vertex;
bool flipFound = false;
bool moreFlipsFound = false;
for (int cc = 0; cc < cneigh; ++cc)
{
vertex = d_cellVertices[cellNeighborIndex(cc,cell)];
//what are the other cells attached to this vertex? For correctness, only one cell should
//own each vertex here. For simplicity, only the lowest-indexed cell gets to do any work.
int c1,c2,c3,c4;
c1 = d_vertexCellNeighbors[3*vertex];
c2 = d_vertexCellNeighbors[3*vertex+1];
c3 = d_vertexCellNeighbors[3*vertex+2];
if(c1 < cell || c2 < cell || c3 < cell)
continue;
for (int idx = 3*vertex; idx < 3*vertex+3; ++idx)
{
if(d_vertexEdgeFlips[idx] == 1)
{
int vertex2 = d_vertexNeighbors[idx];
int ctest;
for (int ff = 0; ff < 3; ++ff)
{
ctest = d_vertexCellNeighbors[3*vertex2+ff];
if(ctest != c1 && ctest != c2 && ctest != c3)
c4=ctest;
}
if (flipFound)
{
moreFlipsFound = true;
break;
}
//check if the cells have been reserved; if not reserve them
int cc1 = atomicExch(&(d_cellEdgeFlips[c1]),1);
int cc2 = atomicExch(&(d_cellEdgeFlips[c2]),1);
int cc3 = atomicExch(&(d_cellEdgeFlips[c3]),1);
int cc4 = atomicExch(&(d_cellEdgeFlips[c4]),1);
flipFound = true;
if(cc1 ==0 && cc2 ==0 &&cc3==0&&cc4==0)
{
// printf("(%i,%i,%i,%i)\t(%i,%i)\n",c1,c2,c3,c4,vertex,vertex2);
atomicExch(&d_vertexEdgeFlipsCurrent[idx],1);
atomicExch(&d_vertexEdgeFlips[idx],0);
int4 cs;cs.x=c1;cs.y=c2;cs.z=c3;cs.w=c4;
d_cellSets[idx] = cs;
};
}
};
};
if (flipFound)
{
d_finishedFlippingEdges[0] = 1;
if(moreFlipsFound)
d_finishedFlippingEdges[1] = 1;
};
};
bool gpu_vm_parse_multiple_flips(
int *d_vertexEdgeFlips,
int *d_vertexEdgeFlipsCurrent,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_finishedFlippingEdges,
int *d_edgeFlips,
int4 *d_cellSets,
Index2D &cellNeighborIndex,
int Ncells)
{
unsigned int block_size = 512;
/*The issue is that if a cell is involved in two edge flips done by different threads, the resulting
data structure for what vertices belong to cells and what cells border which vertex will be
inconsistently updated.
The strategy will be to take the d_vertexEdgeFlips list, put at most one T1 per cell per vertex into the
d_vertexEdgeFlipsCurrent list (erasing it from the d_vertexEdgeFlips list), and swap the edges specified
by the "current" list. If d_vertexEdgeFlips is empty, we will set d_finishedFlippingEdges[0] to 1,
and if any cell has multiple edges to flip, we set d_finishedFlippingEdges[1] to 1. As long
as the zeroth entry is 1, the flip edges kernel is called; as long as the first entry is 1 the cpp code will continue calling this gpu_avm_flip_edges function.
*/
//first select a few edges to flip...
if(Ncells <512) block_size = 32;
unsigned int nblocks = Ncells/block_size + 1;
vm_one_T1_per_cell_per_vertex_kernel<<<nblocks,block_size>>>(
d_vertexEdgeFlips,
d_vertexEdgeFlipsCurrent,
d_vertexNeighbors,
d_vertexCellNeighbors,
d_cellVertexNum,
d_cellVertices,
d_finishedFlippingEdges,
d_edgeFlips,
d_cellSets,
cellNeighborIndex,
Ncells);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
/*!
Flip any edge labeled for re-wiring in the vertexEdgeFlipsCurrent list
*/
__global__ void vm_flip_edges_kernel(int *d_vertexEdgeFlipsCurrent,
dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_cellEdgeFlips,
int4 *d_cellSets,
sphericalDomain sphere,
Index2D cellNeighborIndex,
int NvTimes3
)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
//return if the index is out of bounds or if the edge isn't marked for flipping
if (idx >= NvTimes3 || d_vertexEdgeFlipsCurrent[idx] == 0)
return;
//identify the vertices and reset the flag
int vertex1 = idx/3;
int vertex2 = d_vertexNeighbors[idx];
d_vertexEdgeFlipsCurrent[idx] = 0;
//first, identify the cell and vertex set involved...
int4 cellSet;cellSet.x=-1;cellSet.y=-1;cellSet.z=-1;cellSet.w=-1;
//int4 vertexSet;
int2 vertexSet;//vertexSet.x = "b", vertexSet.y = "a"
/*
The following is fairly terrible GPU code, and should be considered for refactoring
*/
int4 cells = d_cellSets[idx];
int cell1,cell2,cell3;
int vlast, vcur, vnext, cneigh;
cell1 = cells.x;
cell2 = cells.y;
cell3 = cells.z;
cellSet.w = cells.w;
//classify cell1
cneigh = d_cellVertexNum[cell1];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell1) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell1) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell1)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell1;
else if(vnext == vertex2)
cellSet.z = cell1;
else
{
cellSet.y = cell1;
};
//classify cell2
cneigh = d_cellVertexNum[cell2];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell2) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell2) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell2)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell2;
else if(vnext == vertex2)
cellSet.z = cell2;
else
{
cellSet.y = cell2;
};
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell1)];
}
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell2)];
}
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell3)];
}
//classify cell3
cneigh = d_cellVertexNum[cell3];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cell3) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cell3) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cell3)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
if(vlast == vertex2)
cellSet.x = cell3;
else if(vnext == vertex2)
cellSet.z = cell3;
else
{
cellSet.y = cell3;
};
//get the vertexSet by examining cells j and l
cneigh = d_cellVertexNum[cellSet.y];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cellSet.y) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cellSet.y) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cellSet.y)];
if(vcur == vertex1) break;
vlast = vcur;
vcur = vnext;
};
vertexSet.x=vnext;
cneigh = d_cellVertexNum[cellSet.w];
vlast = d_cellVertices[ cellNeighborIndex(cneigh-2,cellSet.w) ];
vcur = d_cellVertices[ cellNeighborIndex(cneigh-1,cellSet.w) ];
for (int cn = 0; cn < cneigh; ++cn)
{
vnext = d_cellVertices[cellNeighborIndex(cn,cellSet.w)];
if(vcur == vertex2) break;
vlast = vcur;
vcur = vnext;
};
vertexSet.y=vnext;
d_cellEdgeFlips[cells.x] = 0;
d_cellEdgeFlips[cells.y] = 0;
d_cellEdgeFlips[cells.z] = 0;
d_cellEdgeFlips[cells.w] = 0;
//forbid a T1 transition that would shrink a triangular cell
if (d_cellVertexNum[cellSet.x] ==3 || d_cellVertexNum[cellSet.z] ==3)
return;
if(cellSet.x <0 || cellSet.y < 0 || cellSet.z <0 || cellSet.w <0)
return;
//okay, we're ready to go. First, rotate the vertices in the edge and set them at twice their original distance
dVec v1 = d_vertexPositions[vertex1];
dVec v2 = d_vertexPositions[vertex2];
dVec midpoint = 0.5*(v1+v2);
sphere.putInBoxVirtual(midpoint);
//chose the angle of rotation based on whether the edges are currently crossed...
dVec vC = d_vertexPositions[vertexSet.y];//vSet.y is vSet.z
scalar determinant = vC[0]*(v1[1]*v2[2]-v1[2]*v2[1])
+vC[1]*(v1[2]*v2[0]-v1[0]*v2[2])
+vC[2]*(v1[0]*v2[1]-v1[1]*v2[0]);
determinant = determinant > 0 ? 1. : -1. ;
rodriguesRotation(v1,midpoint,-0.5*determinant*PI);
rodriguesRotation(v2,midpoint,-0.5*determinant*PI);
dVec diff = 0.5*(v1-v2);
v1 = v1 + diff;
v2 = v2 - diff;
sphere.putInBoxReal(v1);
sphere.putInBoxReal(v2);
d_vertexPositions[vertex1] = v1;
d_vertexPositions[vertex2] = v2;
//now, re-wire the cells and vertices
//start with the vertex-vertex and vertex-cell neighbors
for (int vert = 0; vert < 3; ++vert)
{
//vertex-cell neighbors
if(d_vertexCellNeighbors[3*vertex1+vert] == cellSet.z)
d_vertexCellNeighbors[3*vertex1+vert] = cellSet.w;
if(d_vertexCellNeighbors[3*vertex2+vert] == cellSet.x)
d_vertexCellNeighbors[3*vertex2+vert] = cellSet.y;
//vertex-vertex neighbors
if(d_vertexNeighbors[3*vertex1+vert] == vertexSet.x)
d_vertexNeighbors[3*vertex1+vert] = vertexSet.y;
if(d_vertexNeighbors[3*vertex2+vert] == vertexSet.y)
d_vertexNeighbors[3*vertex2+vert] = vertexSet.x;
if(d_vertexNeighbors[3*vertexSet.x+vert] == vertex1)
d_vertexNeighbors[3*vertexSet.x+vert] = vertex2;
if(d_vertexNeighbors[3*vertexSet.y+vert] == vertex2)
d_vertexNeighbors[3*vertexSet.y+vert] = vertex1;
};
//now rewire the cells...
//cell i loses v2 as a neighbor
cneigh = d_cellVertexNum[cellSet.x];
int cidx = 0;
for (int cc = 0; cc < cneigh-1; ++cc)
{
if(d_cellVertices[cellNeighborIndex(cc,cellSet.x)] == vertex2)
cidx +=1;
d_cellVertices[cellNeighborIndex(cc,cellSet.x)] = d_cellVertices[cellNeighborIndex(cidx,cellSet.x)];
cidx +=1;
};
d_cellVertexNum[cellSet.x] -= 1;
//cell j gains v2 in between v1 and b, so step through list backwards and insert
cneigh = d_cellVertexNum[cellSet.y];
bool found0 = false;
for (int cc = cneigh-1; cc >= 0; --cc)
{
int cellIndex = d_cellVertices[cellNeighborIndex(cc,cellSet.y)];
if(!found0)
d_cellVertices[cellNeighborIndex(cc+1,cellSet.y)] = cellIndex;
if(cellIndex == vertexSet.x)
{
found0 = true;
d_cellVertices[cellNeighborIndex(cc,cellSet.y)] = vertex2;
}
}
d_cellVertexNum[cellSet.y] += 1;
//cell k loses v1 as a neighbor
cneigh = d_cellVertexNum[cellSet.z];
cidx = 0;
for (int cc = 0; cc < cneigh-1; ++cc)
{
if(d_cellVertices[cellNeighborIndex(cc,cellSet.z)] == vertex1)
cidx +=1;
d_cellVertices[cellNeighborIndex(cc,cellSet.z)] = d_cellVertices[cellNeighborIndex(cidx,cellSet.z)];
cidx +=1;
};
d_cellVertexNum[cellSet.z] -= 1;
//cell l gains v1 in between v2 and a...copy the logic of cell j
cneigh = d_cellVertexNum[cellSet.w];
bool found = false;
for (int cc = cneigh-1; cc >= 0; --cc)
{
int cellIndex = d_cellVertices[cellNeighborIndex(cc,cellSet.w)];
if(!found)
d_cellVertices[cellNeighborIndex(cc+1,cellSet.w)] = cellIndex;
if(cellIndex == vertexSet.y)
{
found = true;
d_cellVertices[cellNeighborIndex(cc,cellSet.w)] = vertex1;
}
}
d_cellVertexNum[cellSet.w] += 1;
}
bool gpu_vm_flip_edges(
int *d_vertexEdgeFlipsCurrent,
dVec *d_vertexPositions,
int *d_vertexNeighbors,
int *d_vertexCellNeighbors,
unsigned int *d_cellVertexNum,
int *d_cellVertices,
int *d_cellEdgeFlips,
int4 *d_cellSets,
sphericalDomain &sphere,
Index2D &cellNeighborIndex,
int Nvertices,
int Ncells)
{
unsigned int block_size = 128;
int NvTimes3 = Nvertices*3;
if (NvTimes3 < 128) block_size = 32;
unsigned int nblocks = NvTimes3/block_size + 1;
vm_flip_edges_kernel<<<nblocks,block_size>>>(
d_vertexEdgeFlipsCurrent,d_vertexPositions,d_vertexNeighbors,
d_vertexCellNeighbors,d_cellVertexNum,d_cellVertices,d_cellEdgeFlips,d_cellSets,
sphere,
cellNeighborIndex,NvTimes3);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
};
/** @} */ //end of group declaration
|
41b1d6bc45f0ebd9e4b3f33a54a0da674d342133.hip | // !!! This is a file automatically generated by hipify!!!
/*
Name: Matthew Matze
Date: 9/28/2016
Class: csc4310
Location: ~/csc3210/deviceq
General Summary of Program
The program is set up to show the various device properties to the screen
To Compile:
nvcc device_query.cu -o device_query
To Execute:
device_query
*/
#include<stdio.h>
void printDevProp(hipDeviceProp_t devProp);
/*
The function prints out some of the properties of the hipDeviceProp_t struct
Parameters:The struct to with which the device info shall be taken from
Postcondition: The specified parameters have been outputed to the screen
*/
int main(void){
int devCnt;
hipGetDeviceCount(&devCnt);
for(int i=0;i<devCnt;i++){
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,i);
printDevProp(devProp);
}
}
void printDevProp(hipDeviceProp_t devProp){
printf("Device name: %s\n", devProp.name);
printf("Major: %d\n",devProp.major);
printf("Minor: %d\n",devProp.minor);
printf("TotalGlobalMem(Bytes): %d\n",devProp.totalGlobalMem);
printf("SharedMemPerBlock: %d\n",devProp.sharedMemPerBlock);
printf("RegsPerBlock: %d\n",devProp.regsPerBlock);
printf("WarpSize: %d\n",devProp.warpSize);
printf("MemPitch: %d\n",devProp.memPitch);
printf("MaxThreadsPerBlock: %d\n",devProp.maxThreadsPerBlock);
printf("MaxThreadsPerMultiProcessor: %d\n",devProp.maxThreadsPerMultiProcessor);
for(int i=0;i<3;i++){
printf("MaxThreadsDim[%d]: %d\n", i ,devProp.maxThreadsDim[i]);
printf("MaxGridSize[%d]: %d\n", i, devProp.maxGridSize[i]);
}
printf("ClockRate: %d\n",devProp.clockRate);
printf("TotalConstMem: %d\n",devProp.totalConstMem);
printf("TextureAlignment: %d\n",devProp.textureAlignment);
printf("DeviceOverlap: %d\n",devProp.deviceOverlap);
printf("MultiProcessorCount: %d\n",devProp.multiProcessorCount);
printf("KernelExecTimeoutEnabled: %d\n",devProp.kernelExecTimeoutEnabled);
}
| 41b1d6bc45f0ebd9e4b3f33a54a0da674d342133.cu | /*
Name: Matthew Matze
Date: 9/28/2016
Class: csc4310
Location: ~/csc3210/deviceq
General Summary of Program
The program is set up to show the various device properties to the screen
To Compile:
nvcc device_query.cu -o device_query
To Execute:
device_query
*/
#include<stdio.h>
void printDevProp(cudaDeviceProp devProp);
/*
The function prints out some of the properties of the cudaDeviceProp struct
Parameters:The struct to with which the device info shall be taken from
Postcondition: The specified parameters have been outputed to the screen
*/
int main(void){
int devCnt;
cudaGetDeviceCount(&devCnt);
for(int i=0;i<devCnt;i++){
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,i);
printDevProp(devProp);
}
}
void printDevProp(cudaDeviceProp devProp){
printf("Device name: %s\n", devProp.name);
printf("Major: %d\n",devProp.major);
printf("Minor: %d\n",devProp.minor);
printf("TotalGlobalMem(Bytes): %d\n",devProp.totalGlobalMem);
printf("SharedMemPerBlock: %d\n",devProp.sharedMemPerBlock);
printf("RegsPerBlock: %d\n",devProp.regsPerBlock);
printf("WarpSize: %d\n",devProp.warpSize);
printf("MemPitch: %d\n",devProp.memPitch);
printf("MaxThreadsPerBlock: %d\n",devProp.maxThreadsPerBlock);
printf("MaxThreadsPerMultiProcessor: %d\n",devProp.maxThreadsPerMultiProcessor);
for(int i=0;i<3;i++){
printf("MaxThreadsDim[%d]: %d\n", i ,devProp.maxThreadsDim[i]);
printf("MaxGridSize[%d]: %d\n", i, devProp.maxGridSize[i]);
}
printf("ClockRate: %d\n",devProp.clockRate);
printf("TotalConstMem: %d\n",devProp.totalConstMem);
printf("TextureAlignment: %d\n",devProp.textureAlignment);
printf("DeviceOverlap: %d\n",devProp.deviceOverlap);
printf("MultiProcessorCount: %d\n",devProp.multiProcessorCount);
printf("KernelExecTimeoutEnabled: %d\n",devProp.kernelExecTimeoutEnabled);
}
|
8e4ee30e53f7eabdaa5c63ca6dfda5c26b6fff82.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 8e4ee30e53f7eabdaa5c63ca6dfda5c26b6fff82.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
17a6ca4a18316afdfc8aa52583009816e71958e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <iostream>
#define TILE_WIDTH 16
__constant__ int xdims_k[4];
__constant__ int wdims_k[4];
__constant__ int ydims_k[4];
__constant__ int xdims_f[2];
__constant__ int wdims_f[2];
/*__global__ void conv_forward_kernel_basic(float *X, float *W, float *Y){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
int C = wdims_k[2]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int c = 0; c < C; c++){
for(int p = 0; p < P; p++){
for(int q = 0; q < Q; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h + p) * xdims_k[2] * xdims_k[3] + (w + q) * xdims_k[3] + c]
* W[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
}
void conv_forward_host_basic(const float *X, const int xdims[4], const float *W,
const int wdims[4], float *Y, const int ydims[4]){
float *X_device;
float *W_device;
float *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
hipMalloc((void**) &X_device, X_size);
hipMalloc((void**) &W_device, W_size);
hipMalloc((void**) &Y_device, Y_size);
hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
hipMemcpy(W_device, W, W_size, hipMemcpyHostToDevice);
hipMemcpy(Y_device, Y, Y_size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(xdims_k, xdims, d_size);
hipMemcpyToSymbol(wdims_k, wdims, d_size);
hipMemcpyToSymbol(ydims_k, ydims, d_size);
// std::cout << X_size/sizeof(float) << ", " << W_size/sizeof(float) << ", " << Y_size/sizeof(float) << std::endl;
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
conv_forward_kernel_basic<<<gridDim, blockDim>>>(X_device, W_device, Y_device);
hipDeviceSynchronize();
hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
hipFree(W_device);
hipFree(Y_device);
}*/
/*__global__ void conv_forward_kernel_tiled(half *X, half *W, half *Y){
int C = xdims_k[3]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + Q - 1;
int X_tile_height = TILE_WIDTH + P - 1;
extern __shared__ half shmem[];
half *X_shared = &shmem[0];
half *W_shared = &shmem[X_tile_width * X_tile_height];
n = blockIdx.x;
m = blockIdx.y;
h0 = threadIdx.y;
w0 = threadIdx.x;
h_base = (blockIdx.z / W_grid) * TILE_WIDTH;
w_base = (blockIdx.z % W_grid) * TILE_WIDTH;
h = h_base + h0;
w = w_base + w0;
float acc = 0;
for(int c = 0; c < C; c++){ // sum over input channels
if((h0 < P) && (w0 < Q)) // load weight
W_shared[h0 * Q + w0] = W[h0 * Q * wdims_k[2] * wdims_k[3] + w0 * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
__syncthreads();
for(int i = h; i < h_base + X_tile_height; i += TILE_WIDTH){ // load tiles
for(int j = w; j < w_base + X_tile_width; j += TILE_WIDTH)
X_shared[(i - h_base) * X_tile_width + (j - w_base)] =
X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + i * xdims_k[2] * xdims_k[3] + j * xdims_k[3] + c];
}
__syncthreads();
if(h < ydims_k[1] && w < ydims_k[2]){
for(int p = 0; p < P; p++){ // sum
for(int q = 0; q < Q; q++)
acc += X_shared[(h0 + p) * X_tile_width + (w0 + q)] * W_shared[p * Q + q];
}
}
__syncthreads();
}
if((h < ydims_k[1]) && (w < ydims_k[2]))
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
half* conv_forward_host_tiled(const half *X, const int xdims[4], const half *W, const int wdims[4],
half *Y, const int ydims[4], half *in = NULL){
half *X_device = in;
half *W_device;
half *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(half);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(half);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(half);
int d_size = 4 * sizeof(int);
if(in == NULL) hipMalloc((void**) &X_device, X_size);
hipMalloc((void**) &W_device, W_size);
hipMalloc((void**) &Y_device, Y_size);
if(in == NULL) hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
hipMemcpy(W_device, W, W_size, hipMemcpyHostToDevice);
//hipMemcpy(Y_device, Y, Y_size, hipMemcpyHostToDevice);
hipMemset(Y, 0, Y_size);
hipMemcpyToSymbol(xdims_k, xdims, d_size);
hipMemcpyToSymbol(wdims_k, wdims, d_size);
hipMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
size_t shmem_size = sizeof(float) * ((TILE_WIDTH + wdims[0] - 1) * (TILE_WIDTH + wdims[1] - 1) + wdims[0] * wdims[1]);
conv_forward_kernel_tiled<<<gridDim, blockDim, shmem_size>>>(X_device, W_device, Y_device);
hipDeviceSynchronize();
//hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
hipFree(W_device);
//hipFree(Y_device);
return Y_device;
}*/
__global__ void average_pool_kernel(float *X, float *Y, int pool_size){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int p = 0; p < pool_size; p++){
for(int q = 0; q < pool_size; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (pool_size * h + p) * xdims_k[2] * xdims_k[3] +
(pool_size * w + q) * xdims_k[3] + m];
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = acc / (1.0f * pool_size * pool_size);
}
}
float* average_pool_host(const float *X, const int xdims[4], const int pool_size,
float *Y, const int ydims[4], float *in = NULL){
float *X_device = in;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
//if(in == NULL) hipMalloc((void**) &X_device, X_size);
hipMalloc((void**) &Y_device, Y_size);
//if(in == NULL) hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
//hipMemcpy(Y_device, Y, Y_size, hipMemcpyHostToDevice);
hipMemset(Y, 0, Y_size);
hipMemcpyToSymbol(xdims_k, xdims, d_size);
hipMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
hipLaunchKernelGGL(( average_pool_kernel), dim3(gridDim), dim3(blockDim), 0, 0, X_device, Y_device, pool_size);
hipDeviceSynchronize();
//hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
//hipFree(Y_device);
return Y_device;
}
/*__global__ void fully_forward_kernel(float *X, float *W, float *Y, bool relu){
int i = blockIdx.x * TILE_WIDTH + threadIdx.x;
int j = blockIdx.y * TILE_WIDTH + threadIdx.y;
if(i < xdims_f[0] && j < wdims_f[1]){
float sum = 0;
for (int k = 0; k < xdims_f[1]; k++) {
sum += X[i * xdims_f[1] + k] * W[k * wdims_f[1] + j];
}
Y[i * wdims_f[1] + j] = (relu && (sum < 0)) ? 0: sum;
}
}
float* fully_forward_host(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in = NULL, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//if(in == NULL) hipMalloc((void**) &X_device, X_size);
hipMalloc((void**) &W_device, W_size);
hipMalloc((void**) &Y_device, Y_size);
//if(in == NULL) hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
hipMemcpy(W_device, W, W_size, hipMemcpyHostToDevice);
//hipMemcpy(Y_device, Y, Y_size, hipMemcpyHostToDevice);
hipMemset(Y, 0, Y_size);
hipMemcpyToSymbol(xdims_f, xdims, d_size);
hipMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, (wdims[1]+TILE_WIDTH-1)/TILE_WIDTH, 1);
fully_forward_kernel<<<gridDim, blockDim>>>(X_device, W_device, Y_device, !copy);
hipDeviceSynchronize();
if(copy) hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
hipFree(W_device);
//hipFree(Y_device);
return Y_device;
}*/
__global__ void fully_forward_kernel_tiled(float *A, float *B, float *C, bool relu) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
int bx=blockIdx.x;int by=blockIdx.y;
int tx=threadIdx.x;int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph = 0; ph < (xdims_f[1] + TILE_WIDTH - 1) / TILE_WIDTH; ++ph){
if ((Row < xdims_f[0]) && (ph * TILE_WIDTH + tx < xdims_f[1]))
Ads[ty * TILE_WIDTH + tx] = A[Row * xdims_f[1] + ph * TILE_WIDTH + tx];
else
Ads[ty * TILE_WIDTH + tx] = 0.0;
if((ph * TILE_WIDTH + ty < wdims_f[0]) && (Col < wdims_f[1]))
Bds[ty * TILE_WIDTH + tx] = B[((ph * TILE_WIDTH) + ty) * wdims_f[1] + Col];
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
Cvalue += Ads[ty * TILE_WIDTH + k] * Bds[k * TILE_WIDTH + tx];
__syncthreads();
}
if ((Row < xdims_f[0]) && (Col < wdims_f[1]))
C[Row * wdims_f[1] + Col] = (Cvalue < 0 && relu) ? 0 : Cvalue;
}
float* fully_forward_host_tiled(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//hipMalloc((void**) &X_device, X_size);
hipMalloc((void**) &W_device, W_size);
hipMalloc((void**) &Y_device, Y_size);
//hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
hipMemcpy(W_device, W, W_size, hipMemcpyHostToDevice);
hipMemcpy(Y_device, Y, Y_size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(xdims_f, xdims, d_size);
hipMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((wdims[1]+TILE_WIDTH-1)/TILE_WIDTH,(xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, 1);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
hipLaunchKernelGGL(( fully_forward_kernel_tiled), dim3(gridDim), dim3(blockDim), shmemm_size, 0, X_device, W_device, Y_device, !copy);
hipDeviceSynchronize();
if(copy) hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
hipFree(W_device);
//hipFree(Y_device);
return Y_device;
}
__global__ void gemmrelu_conv_kernel_merge(float *A, float *B, float *C) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] // X[n, h + p,w + q,c]
int n = blockIdx.z;
int numARows = ydims_k[3];
int numAColumns = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBRows = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBColumns = ydims_k[1] * ydims_k[2];
int numCRows = ydims_k[3];
int numCColumns = ydims_k[1] * ydims_k[2];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph=0;ph<(numAColumns+TILE_WIDTH-1)/TILE_WIDTH;++ph){
if ((Row<numARows)&&(ph*TILE_WIDTH+tx<numAColumns)){
int m = by * TILE_WIDTH + ty;
int c = (ph * TILE_WIDTH + tx)/ (wdims_k[0] * wdims_k[1]);
int p = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int q = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
Ads[ty * TILE_WIDTH + tx]=A[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
else
Ads[ty * TILE_WIDTH + tx]=0.0;
if((ph * TILE_WIDTH + ty<numBRows)&&(Col<numBColumns)){
int cx = (ph * TILE_WIDTH + ty) / (wdims_k[0] * wdims_k[1]);
int px = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int qx = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
int h_out = (bx * TILE_WIDTH + tx) / ydims_k[2];
int w_out = (bx * TILE_WIDTH + tx) % ydims_k[2];
Bds[ty * TILE_WIDTH + tx] =
B[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h_out + px) * xdims_k[2] * xdims_k[3] +(w_out + qx) * xdims_k[3] + cx];}
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k=0; k<TILE_WIDTH; ++k){
Cvalue += Ads[ty * TILE_WIDTH + k]*Bds[k * TILE_WIDTH + tx];}
__syncthreads();
}
if ((Row<numCRows)&&(Col<numCColumns)){
atomicAdd(&C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
+ (Col % ydims_k[2]) * ydims_k[3] + Row], (Cvalue < 0) ? 0 : Cvalue);
//C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
//+ (Col % ydims_k[2]) * ydims_k[3] + Row] = (Cvalue < 0) ? 0 : Cvalue;
}
}
float* convLayer_forward_merge(float *X, float *W, float *Y, const int xdims[4], const int ydims[4],
const int wdims[4], float *in = NULL){
int d_size = sizeof(int) * 4;
int W_size = sizeof(float) * wdims[0] * wdims[1] *wdims[2] * wdims[3];
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
float *W_device; //// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] X[n, h + p,w + q,c]
float *X_device = in;
float *Y_device;
hipMalloc((void**) &Y_device, Y_size);
hipMalloc((void**) &W_device, W_size);
if(in == NULL) hipMalloc((void**) &X_device, X_size);
hipMemset(Y_device, 0, Y_size);
hipMemcpy(W_device, W, W_size, hipMemcpyHostToDevice);
if(in == NULL) hipMemcpy(X_device, X, X_size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(wdims_k, wdims, d_size);
hipMemcpyToSymbol(xdims_k, xdims, d_size);
hipMemcpyToSymbol(ydims_k, ydims, d_size);
dim3 blockDim1(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim3((ydims[1] * ydims[2] + TILE_WIDTH-1)/TILE_WIDTH, (ydims[3]+TILE_WIDTH-1)/TILE_WIDTH, ydims[0]);
hipLaunchKernelGGL(( gemmrelu_conv_kernel_merge), dim3(gridDim3), dim3(blockDim1), shmemm_size , 0, W_device, X_device, Y_device);
hipDeviceSynchronize();
//hipMemcpy(Y, Y_device, Y_size, hipMemcpyDeviceToHost);
hipFree(X_device);
//hipFree(Y_device);
hipFree(W_device);
return Y_device;
}
__global__ void float_to_half_kernel(float *in, half *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __float2half(in[pos]);
}
half* float_to_half_host(float *in, int size){
float *in_device;
half *out_device;
hipMalloc((void**) &in_device, sizeof(float) * size);
hipMalloc((void**) &out_device, size * sizeof(half));
hipMemcpy((void**) &in_device, in, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
hipLaunchKernelGGL(( float_to_half_kernel), dim3(gridDim), dim3(blockDim), 0, 0, in_device, out_device, size);
hipDeviceSynchronize();
hipFree(in_device);
return out_device;
}
__global__ void half_to_float_kernel(half *in, float *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __half2float(in[pos]);
}
float* half_to_float_host(half *in, int size){
half *in_device;
float *out_device;
hipMalloc((void**) &in_device, sizeof(half) * size);
hipMalloc((void**) &out_device, sizeof(float) * size);
hipMemcpy((void**) &in_device, in, sizeof(half) * size, hipMemcpyHostToDevice);
hipMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
hipLaunchKernelGGL(( half_to_float_kernel), dim3(gridDim), dim3(blockDim), 0, 0, in_device, out_device, size);
hipDeviceSynchronize();
hipFree(in_device);
return out_device;
}
| 17a6ca4a18316afdfc8aa52583009816e71958e7.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include <iostream>
#define TILE_WIDTH 16
__constant__ int xdims_k[4];
__constant__ int wdims_k[4];
__constant__ int ydims_k[4];
__constant__ int xdims_f[2];
__constant__ int wdims_f[2];
/*__global__ void conv_forward_kernel_basic(float *X, float *W, float *Y){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
int C = wdims_k[2]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int c = 0; c < C; c++){
for(int p = 0; p < P; p++){
for(int q = 0; q < Q; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h + p) * xdims_k[2] * xdims_k[3] + (w + q) * xdims_k[3] + c]
* W[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
}
void conv_forward_host_basic(const float *X, const int xdims[4], const float *W,
const int wdims[4], float *Y, const int ydims[4]){
float *X_device;
float *W_device;
float *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
// std::cout << X_size/sizeof(float) << ", " << W_size/sizeof(float) << ", " << Y_size/sizeof(float) << std::endl;
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
conv_forward_kernel_basic<<<gridDim, blockDim>>>(X_device, W_device, Y_device);
cudaDeviceSynchronize();
cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
cudaFree(Y_device);
}*/
/*__global__ void conv_forward_kernel_tiled(half *X, half *W, half *Y){
int C = xdims_k[3]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + Q - 1;
int X_tile_height = TILE_WIDTH + P - 1;
extern __shared__ half shmem[];
half *X_shared = &shmem[0];
half *W_shared = &shmem[X_tile_width * X_tile_height];
n = blockIdx.x;
m = blockIdx.y;
h0 = threadIdx.y;
w0 = threadIdx.x;
h_base = (blockIdx.z / W_grid) * TILE_WIDTH;
w_base = (blockIdx.z % W_grid) * TILE_WIDTH;
h = h_base + h0;
w = w_base + w0;
float acc = 0;
for(int c = 0; c < C; c++){ // sum over input channels
if((h0 < P) && (w0 < Q)) // load weight
W_shared[h0 * Q + w0] = W[h0 * Q * wdims_k[2] * wdims_k[3] + w0 * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
__syncthreads();
for(int i = h; i < h_base + X_tile_height; i += TILE_WIDTH){ // load tiles
for(int j = w; j < w_base + X_tile_width; j += TILE_WIDTH)
X_shared[(i - h_base) * X_tile_width + (j - w_base)] =
X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + i * xdims_k[2] * xdims_k[3] + j * xdims_k[3] + c];
}
__syncthreads();
if(h < ydims_k[1] && w < ydims_k[2]){
for(int p = 0; p < P; p++){ // sum
for(int q = 0; q < Q; q++)
acc += X_shared[(h0 + p) * X_tile_width + (w0 + q)] * W_shared[p * Q + q];
}
}
__syncthreads();
}
if((h < ydims_k[1]) && (w < ydims_k[2]))
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
half* conv_forward_host_tiled(const half *X, const int xdims[4], const half *W, const int wdims[4],
half *Y, const int ydims[4], half *in = NULL){
half *X_device = in;
half *W_device;
half *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(half);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(half);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(half);
int d_size = 4 * sizeof(int);
if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
size_t shmem_size = sizeof(float) * ((TILE_WIDTH + wdims[0] - 1) * (TILE_WIDTH + wdims[1] - 1) + wdims[0] * wdims[1]);
conv_forward_kernel_tiled<<<gridDim, blockDim, shmem_size>>>(X_device, W_device, Y_device);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}*/
__global__ void average_pool_kernel(float *X, float *Y, int pool_size){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int p = 0; p < pool_size; p++){
for(int q = 0; q < pool_size; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (pool_size * h + p) * xdims_k[2] * xdims_k[3] +
(pool_size * w + q) * xdims_k[3] + m];
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = acc / (1.0f * pool_size * pool_size);
}
}
float* average_pool_host(const float *X, const int xdims[4], const int pool_size,
float *Y, const int ydims[4], float *in = NULL){
float *X_device = in;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
//if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &Y_device, Y_size);
//if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
average_pool_kernel<<<gridDim, blockDim>>>(X_device, Y_device, pool_size);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
//cudaFree(Y_device);
return Y_device;
}
/*__global__ void fully_forward_kernel(float *X, float *W, float *Y, bool relu){
int i = blockIdx.x * TILE_WIDTH + threadIdx.x;
int j = blockIdx.y * TILE_WIDTH + threadIdx.y;
if(i < xdims_f[0] && j < wdims_f[1]){
float sum = 0;
for (int k = 0; k < xdims_f[1]; k++) {
sum += X[i * xdims_f[1] + k] * W[k * wdims_f[1] + j];
}
Y[i * wdims_f[1] + j] = (relu && (sum < 0)) ? 0: sum;
}
}
float* fully_forward_host(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in = NULL, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
//if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_f, xdims, d_size);
cudaMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, (wdims[1]+TILE_WIDTH-1)/TILE_WIDTH, 1);
fully_forward_kernel<<<gridDim, blockDim>>>(X_device, W_device, Y_device, !copy);
cudaDeviceSynchronize();
if(copy) cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}*/
__global__ void fully_forward_kernel_tiled(float *A, float *B, float *C, bool relu) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
int bx=blockIdx.x;int by=blockIdx.y;
int tx=threadIdx.x;int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph = 0; ph < (xdims_f[1] + TILE_WIDTH - 1) / TILE_WIDTH; ++ph){
if ((Row < xdims_f[0]) && (ph * TILE_WIDTH + tx < xdims_f[1]))
Ads[ty * TILE_WIDTH + tx] = A[Row * xdims_f[1] + ph * TILE_WIDTH + tx];
else
Ads[ty * TILE_WIDTH + tx] = 0.0;
if((ph * TILE_WIDTH + ty < wdims_f[0]) && (Col < wdims_f[1]))
Bds[ty * TILE_WIDTH + tx] = B[((ph * TILE_WIDTH) + ty) * wdims_f[1] + Col];
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
Cvalue += Ads[ty * TILE_WIDTH + k] * Bds[k * TILE_WIDTH + tx];
__syncthreads();
}
if ((Row < xdims_f[0]) && (Col < wdims_f[1]))
C[Row * wdims_f[1] + Col] = (Cvalue < 0 && relu) ? 0 : Cvalue;
}
float* fully_forward_host_tiled(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
//cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(xdims_f, xdims, d_size);
cudaMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((wdims[1]+TILE_WIDTH-1)/TILE_WIDTH,(xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, 1);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
fully_forward_kernel_tiled<<<gridDim, blockDim, shmemm_size>>>(X_device, W_device, Y_device, !copy);
cudaDeviceSynchronize();
if(copy) cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}
__global__ void gemmrelu_conv_kernel_merge(float *A, float *B, float *C) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] // X[n, h + p,w + q,c]
int n = blockIdx.z;
int numARows = ydims_k[3];
int numAColumns = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBRows = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBColumns = ydims_k[1] * ydims_k[2];
int numCRows = ydims_k[3];
int numCColumns = ydims_k[1] * ydims_k[2];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph=0;ph<(numAColumns+TILE_WIDTH-1)/TILE_WIDTH;++ph){
if ((Row<numARows)&&(ph*TILE_WIDTH+tx<numAColumns)){
int m = by * TILE_WIDTH + ty;
int c = (ph * TILE_WIDTH + tx)/ (wdims_k[0] * wdims_k[1]);
int p = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int q = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
Ads[ty * TILE_WIDTH + tx]=A[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
else
Ads[ty * TILE_WIDTH + tx]=0.0;
if((ph * TILE_WIDTH + ty<numBRows)&&(Col<numBColumns)){
int cx = (ph * TILE_WIDTH + ty) / (wdims_k[0] * wdims_k[1]);
int px = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int qx = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
int h_out = (bx * TILE_WIDTH + tx) / ydims_k[2];
int w_out = (bx * TILE_WIDTH + tx) % ydims_k[2];
Bds[ty * TILE_WIDTH + tx] =
B[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h_out + px) * xdims_k[2] * xdims_k[3] +(w_out + qx) * xdims_k[3] + cx];}
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k=0; k<TILE_WIDTH; ++k){
Cvalue += Ads[ty * TILE_WIDTH + k]*Bds[k * TILE_WIDTH + tx];}
__syncthreads();
}
if ((Row<numCRows)&&(Col<numCColumns)){
atomicAdd(&C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
+ (Col % ydims_k[2]) * ydims_k[3] + Row], (Cvalue < 0) ? 0 : Cvalue);
//C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
//+ (Col % ydims_k[2]) * ydims_k[3] + Row] = (Cvalue < 0) ? 0 : Cvalue;
}
}
float* convLayer_forward_merge(float *X, float *W, float *Y, const int xdims[4], const int ydims[4],
const int wdims[4], float *in = NULL){
int d_size = sizeof(int) * 4;
int W_size = sizeof(float) * wdims[0] * wdims[1] *wdims[2] * wdims[3];
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
float *W_device; //// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] X[n, h + p,w + q,c]
float *X_device = in;
float *Y_device;
cudaMalloc((void**) &Y_device, Y_size);
cudaMalloc((void**) &W_device, W_size);
if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMemset(Y_device, 0, Y_size);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
dim3 blockDim1(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim3((ydims[1] * ydims[2] + TILE_WIDTH-1)/TILE_WIDTH, (ydims[3]+TILE_WIDTH-1)/TILE_WIDTH, ydims[0]);
gemmrelu_conv_kernel_merge<<< gridDim3, blockDim1, shmemm_size >>>(W_device, X_device, Y_device);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
//cudaFree(Y_device);
cudaFree(W_device);
return Y_device;
}
__global__ void float_to_half_kernel(float *in, half *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __float2half(in[pos]);
}
half* float_to_half_host(float *in, int size){
float *in_device;
half *out_device;
cudaMalloc((void**) &in_device, sizeof(float) * size);
cudaMalloc((void**) &out_device, size * sizeof(half));
cudaMemcpy((void**) &in_device, in, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
float_to_half_kernel<<<gridDim, blockDim>>>(in_device, out_device, size);
cudaDeviceSynchronize();
cudaFree(in_device);
return out_device;
}
__global__ void half_to_float_kernel(half *in, float *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __half2float(in[pos]);
}
float* half_to_float_host(half *in, int size){
half *in_device;
float *out_device;
cudaMalloc((void**) &in_device, sizeof(half) * size);
cudaMalloc((void**) &out_device, sizeof(float) * size);
cudaMemcpy((void**) &in_device, in, sizeof(half) * size, cudaMemcpyHostToDevice);
cudaMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
half_to_float_kernel<<<gridDim, blockDim>>>(in_device, out_device, size);
cudaDeviceSynchronize();
cudaFree(in_device);
return out_device;
}
|
c89a62bf0a0d95e1f0d787c46e5486f0b415e1ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <float.h>
#include "trilinear_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void TriLinearForward(const int nthreads, const float* lut, const float* image, float* output, const int dim, const int shift, const float binsize, const int width, const int height, const int batch) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
float r = image[index];
float g = image[index + width * height * batch];
float b = image[index + width * height * batch * 2];
int r_id = floor(r / binsize);
int g_id = floor(g / binsize);
int b_id = floor(b / binsize);
float r_d = fmod(r,binsize) / binsize;
float g_d = fmod(g,binsize) / binsize;
float b_d = fmod(b,binsize) / binsize;
int id000 = r_id + g_id * dim + b_id * dim * dim;
int id100 = r_id + 1 + g_id * dim + b_id * dim * dim;
int id010 = r_id + (g_id + 1) * dim + b_id * dim * dim;
int id110 = r_id + 1 + (g_id + 1) * dim + b_id * dim * dim;
int id001 = r_id + g_id * dim + (b_id + 1) * dim * dim;
int id101 = r_id + 1 + g_id * dim + (b_id + 1) * dim * dim;
int id011 = r_id + (g_id + 1) * dim + (b_id + 1) * dim * dim;
int id111 = r_id + 1 + (g_id + 1) * dim + (b_id + 1) * dim * dim;
float w000 = (1-r_d)*(1-g_d)*(1-b_d);
float w100 = r_d*(1-g_d)*(1-b_d);
float w010 = (1-r_d)*g_d*(1-b_d);
float w110 = r_d*g_d*(1-b_d);
float w001 = (1-r_d)*(1-g_d)*b_d;
float w101 = r_d*(1-g_d)*b_d;
float w011 = (1-r_d)*g_d*b_d;
float w111 = r_d*g_d*b_d;
output[index] = w000 * lut[id000] + w100 * lut[id100] +
w010 * lut[id010] + w110 * lut[id110] +
w001 * lut[id001] + w101 * lut[id101] +
w011 * lut[id011] + w111 * lut[id111];
output[index + width * height * batch] = w000 * lut[id000 + shift] + w100 * lut[id100 + shift] +
w010 * lut[id010 + shift] + w110 * lut[id110 + shift] +
w001 * lut[id001 + shift] + w101 * lut[id101 + shift] +
w011 * lut[id011 + shift] + w111 * lut[id111 + shift];
output[index + width * height * batch * 2] = w000 * lut[id000 + shift * 2] + w100 * lut[id100 + shift * 2] +
w010 * lut[id010 + shift * 2] + w110 * lut[id110 + shift * 2] +
w001 * lut[id001 + shift * 2] + w101 * lut[id101 + shift * 2] +
w011 * lut[id011 + shift * 2] + w111 * lut[id111 + shift * 2];
}
}
int TriLinearForwardLaucher(const float* lut, const float* image, float* output, const int lut_dim, const int shift, const float binsize, const int width, const int height, const int batch, hipStream_t stream) {
const int kThreadsPerBlock = 1024;
const int output_size = height * width * batch;
hipError_t err;
hipLaunchKernelGGL(( TriLinearForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, lut, image, output, lut_dim, shift, binsize, width, height, batch);
err = hipGetLastError();
if(hipSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
__global__ void TriLinearBackward(const int nthreads, const float* image, const float* image_grad, float* lut_grad, const int dim, const int shift, const float binsize, const int width, const int height, const int batch) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
float r = image[index];
float g = image[index + width * height * batch];
float b = image[index + width * height * batch * 2];
int r_id = floor(r / binsize);
int g_id = floor(g / binsize);
int b_id = floor(b / binsize);
float r_d = fmod(r,binsize) / binsize;
float g_d = fmod(g,binsize) / binsize;
float b_d = fmod(b,binsize) / binsize;
int id000 = r_id + g_id * dim + b_id * dim * dim;
int id100 = r_id + 1 + g_id * dim + b_id * dim * dim;
int id010 = r_id + (g_id + 1) * dim + b_id * dim * dim;
int id110 = r_id + 1 + (g_id + 1) * dim + b_id * dim * dim;
int id001 = r_id + g_id * dim + (b_id + 1) * dim * dim;
int id101 = r_id + 1 + g_id * dim + (b_id + 1) * dim * dim;
int id011 = r_id + (g_id + 1) * dim + (b_id + 1) * dim * dim;
int id111 = r_id + 1 + (g_id + 1) * dim + (b_id + 1) * dim * dim;
float w000 = (1-r_d)*(1-g_d)*(1-b_d);
float w100 = r_d*(1-g_d)*(1-b_d);
float w010 = (1-r_d)*g_d*(1-b_d);
float w110 = r_d*g_d*(1-b_d);
float w001 = (1-r_d)*(1-g_d)*b_d;
float w101 = r_d*(1-g_d)*b_d;
float w011 = (1-r_d)*g_d*b_d;
float w111 = r_d*g_d*b_d;
atomicAdd(lut_grad + id000, image_grad[index] * w000);
atomicAdd(lut_grad + id100, image_grad[index] * w100);
atomicAdd(lut_grad + id010, image_grad[index] * w010);
atomicAdd(lut_grad + id110, image_grad[index] * w110);
atomicAdd(lut_grad + id001, image_grad[index] * w001);
atomicAdd(lut_grad + id101, image_grad[index] * w101);
atomicAdd(lut_grad + id011, image_grad[index] * w011);
atomicAdd(lut_grad + id111, image_grad[index] * w111);
atomicAdd(lut_grad + id000 + shift, image_grad[index + width * height * batch] * w000);
atomicAdd(lut_grad + id100 + shift, image_grad[index + width * height * batch] * w100);
atomicAdd(lut_grad + id010 + shift, image_grad[index + width * height * batch] * w010);
atomicAdd(lut_grad + id110 + shift, image_grad[index + width * height * batch] * w110);
atomicAdd(lut_grad + id001 + shift, image_grad[index + width * height * batch] * w001);
atomicAdd(lut_grad + id101 + shift, image_grad[index + width * height * batch] * w101);
atomicAdd(lut_grad + id011 + shift, image_grad[index + width * height * batch] * w011);
atomicAdd(lut_grad + id111 + shift, image_grad[index + width * height * batch] * w111);
atomicAdd(lut_grad + id000 + shift * 2, image_grad[index + width * height * batch * 2] * w000);
atomicAdd(lut_grad + id100 + shift * 2, image_grad[index + width * height * batch * 2] * w100);
atomicAdd(lut_grad + id010 + shift * 2, image_grad[index + width * height * batch * 2] * w010);
atomicAdd(lut_grad + id110 + shift * 2, image_grad[index + width * height * batch * 2] * w110);
atomicAdd(lut_grad + id001 + shift * 2, image_grad[index + width * height * batch * 2] * w001);
atomicAdd(lut_grad + id101 + shift * 2, image_grad[index + width * height * batch * 2] * w101);
atomicAdd(lut_grad + id011 + shift * 2, image_grad[index + width * height * batch * 2] * w011);
atomicAdd(lut_grad + id111 + shift * 2, image_grad[index + width * height * batch * 2] * w111);
}
}
int TriLinearBackwardLaucher(const float* image, const float* image_grad, float* lut_grad, const int lut_dim, const int shift, const float binsize, const int width, const int height, const int batch, hipStream_t stream) {
const int kThreadsPerBlock = 1024;
const int output_size = height * width * batch;
hipError_t err;
hipLaunchKernelGGL(( TriLinearBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, image, image_grad, lut_grad, lut_dim, shift, binsize, width, height, batch);
err = hipGetLastError();
if(hipSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
| c89a62bf0a0d95e1f0d787c46e5486f0b415e1ca.cu | #include <math.h>
#include <float.h>
#include "trilinear_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void TriLinearForward(const int nthreads, const float* lut, const float* image, float* output, const int dim, const int shift, const float binsize, const int width, const int height, const int batch) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
float r = image[index];
float g = image[index + width * height * batch];
float b = image[index + width * height * batch * 2];
int r_id = floor(r / binsize);
int g_id = floor(g / binsize);
int b_id = floor(b / binsize);
float r_d = fmod(r,binsize) / binsize;
float g_d = fmod(g,binsize) / binsize;
float b_d = fmod(b,binsize) / binsize;
int id000 = r_id + g_id * dim + b_id * dim * dim;
int id100 = r_id + 1 + g_id * dim + b_id * dim * dim;
int id010 = r_id + (g_id + 1) * dim + b_id * dim * dim;
int id110 = r_id + 1 + (g_id + 1) * dim + b_id * dim * dim;
int id001 = r_id + g_id * dim + (b_id + 1) * dim * dim;
int id101 = r_id + 1 + g_id * dim + (b_id + 1) * dim * dim;
int id011 = r_id + (g_id + 1) * dim + (b_id + 1) * dim * dim;
int id111 = r_id + 1 + (g_id + 1) * dim + (b_id + 1) * dim * dim;
float w000 = (1-r_d)*(1-g_d)*(1-b_d);
float w100 = r_d*(1-g_d)*(1-b_d);
float w010 = (1-r_d)*g_d*(1-b_d);
float w110 = r_d*g_d*(1-b_d);
float w001 = (1-r_d)*(1-g_d)*b_d;
float w101 = r_d*(1-g_d)*b_d;
float w011 = (1-r_d)*g_d*b_d;
float w111 = r_d*g_d*b_d;
output[index] = w000 * lut[id000] + w100 * lut[id100] +
w010 * lut[id010] + w110 * lut[id110] +
w001 * lut[id001] + w101 * lut[id101] +
w011 * lut[id011] + w111 * lut[id111];
output[index + width * height * batch] = w000 * lut[id000 + shift] + w100 * lut[id100 + shift] +
w010 * lut[id010 + shift] + w110 * lut[id110 + shift] +
w001 * lut[id001 + shift] + w101 * lut[id101 + shift] +
w011 * lut[id011 + shift] + w111 * lut[id111 + shift];
output[index + width * height * batch * 2] = w000 * lut[id000 + shift * 2] + w100 * lut[id100 + shift * 2] +
w010 * lut[id010 + shift * 2] + w110 * lut[id110 + shift * 2] +
w001 * lut[id001 + shift * 2] + w101 * lut[id101 + shift * 2] +
w011 * lut[id011 + shift * 2] + w111 * lut[id111 + shift * 2];
}
}
int TriLinearForwardLaucher(const float* lut, const float* image, float* output, const int lut_dim, const int shift, const float binsize, const int width, const int height, const int batch, cudaStream_t stream) {
const int kThreadsPerBlock = 1024;
const int output_size = height * width * batch;
cudaError_t err;
TriLinearForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(output_size, lut, image, output, lut_dim, shift, binsize, width, height, batch);
err = cudaGetLastError();
if(cudaSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
__global__ void TriLinearBackward(const int nthreads, const float* image, const float* image_grad, float* lut_grad, const int dim, const int shift, const float binsize, const int width, const int height, const int batch) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
float r = image[index];
float g = image[index + width * height * batch];
float b = image[index + width * height * batch * 2];
int r_id = floor(r / binsize);
int g_id = floor(g / binsize);
int b_id = floor(b / binsize);
float r_d = fmod(r,binsize) / binsize;
float g_d = fmod(g,binsize) / binsize;
float b_d = fmod(b,binsize) / binsize;
int id000 = r_id + g_id * dim + b_id * dim * dim;
int id100 = r_id + 1 + g_id * dim + b_id * dim * dim;
int id010 = r_id + (g_id + 1) * dim + b_id * dim * dim;
int id110 = r_id + 1 + (g_id + 1) * dim + b_id * dim * dim;
int id001 = r_id + g_id * dim + (b_id + 1) * dim * dim;
int id101 = r_id + 1 + g_id * dim + (b_id + 1) * dim * dim;
int id011 = r_id + (g_id + 1) * dim + (b_id + 1) * dim * dim;
int id111 = r_id + 1 + (g_id + 1) * dim + (b_id + 1) * dim * dim;
float w000 = (1-r_d)*(1-g_d)*(1-b_d);
float w100 = r_d*(1-g_d)*(1-b_d);
float w010 = (1-r_d)*g_d*(1-b_d);
float w110 = r_d*g_d*(1-b_d);
float w001 = (1-r_d)*(1-g_d)*b_d;
float w101 = r_d*(1-g_d)*b_d;
float w011 = (1-r_d)*g_d*b_d;
float w111 = r_d*g_d*b_d;
atomicAdd(lut_grad + id000, image_grad[index] * w000);
atomicAdd(lut_grad + id100, image_grad[index] * w100);
atomicAdd(lut_grad + id010, image_grad[index] * w010);
atomicAdd(lut_grad + id110, image_grad[index] * w110);
atomicAdd(lut_grad + id001, image_grad[index] * w001);
atomicAdd(lut_grad + id101, image_grad[index] * w101);
atomicAdd(lut_grad + id011, image_grad[index] * w011);
atomicAdd(lut_grad + id111, image_grad[index] * w111);
atomicAdd(lut_grad + id000 + shift, image_grad[index + width * height * batch] * w000);
atomicAdd(lut_grad + id100 + shift, image_grad[index + width * height * batch] * w100);
atomicAdd(lut_grad + id010 + shift, image_grad[index + width * height * batch] * w010);
atomicAdd(lut_grad + id110 + shift, image_grad[index + width * height * batch] * w110);
atomicAdd(lut_grad + id001 + shift, image_grad[index + width * height * batch] * w001);
atomicAdd(lut_grad + id101 + shift, image_grad[index + width * height * batch] * w101);
atomicAdd(lut_grad + id011 + shift, image_grad[index + width * height * batch] * w011);
atomicAdd(lut_grad + id111 + shift, image_grad[index + width * height * batch] * w111);
atomicAdd(lut_grad + id000 + shift * 2, image_grad[index + width * height * batch * 2] * w000);
atomicAdd(lut_grad + id100 + shift * 2, image_grad[index + width * height * batch * 2] * w100);
atomicAdd(lut_grad + id010 + shift * 2, image_grad[index + width * height * batch * 2] * w010);
atomicAdd(lut_grad + id110 + shift * 2, image_grad[index + width * height * batch * 2] * w110);
atomicAdd(lut_grad + id001 + shift * 2, image_grad[index + width * height * batch * 2] * w001);
atomicAdd(lut_grad + id101 + shift * 2, image_grad[index + width * height * batch * 2] * w101);
atomicAdd(lut_grad + id011 + shift * 2, image_grad[index + width * height * batch * 2] * w011);
atomicAdd(lut_grad + id111 + shift * 2, image_grad[index + width * height * batch * 2] * w111);
}
}
int TriLinearBackwardLaucher(const float* image, const float* image_grad, float* lut_grad, const int lut_dim, const int shift, const float binsize, const int width, const int height, const int batch, cudaStream_t stream) {
const int kThreadsPerBlock = 1024;
const int output_size = height * width * batch;
cudaError_t err;
TriLinearBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(output_size, image, image_grad, lut_grad, lut_dim, shift, binsize, width, height, batch);
err = cudaGetLastError();
if(cudaSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
|
a59cbd54f69c1a7f8af1d49bd135d744cd8219fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <helper_functions.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../tools/gputimer.h"
#include "../data/data_layout.cuh"
#include "../core/dynamic_cuckoo.cuh"
namespace ch = cuckoo_helpers;
using namespace std;
class DynamicTest {
public:
using data_t = DataLayout<>::data_t;
using key_t = DataLayout<>::key_t;
using value_t = DataLayout<>::value_t;
int r = 2;
int batch_size = 100000; //smaller batch size: 2e4 4e4 6e4 8e4 10e4
double lower_bound = 0.5; //lower bound: 0.3 0.35 0.4 0.45 0.5
double upper_bound = 0.85; //upper bound: 0.7 0.75 0.8 0.85 0.9
int pool_len = 0;
key_t *keys_pool_d;
value_t *value_pool_d, *check_pool_d;
double init_fill_factor = 0.85;
static key_t *read_data(char *file_name, int data_len) {
FILE *fid;
fid = fopen(file_name, "rb");
key_t *pos = (key_t *) malloc(sizeof(key_t) * data_len);
if (fid == NULL) {
printf("file not found.\n");
return pos;
}
fread(pos, sizeof(unsigned int), data_len, fid);
fclose(fid);
return pos;
}
void batch_check(value_t *check_pool_d, int32_t single_batch_size, uint32_t offset) {
uint32_t error_cnt = 0;
value_t *check_pool_h = new value_t[single_batch_size];
hipMemcpy(check_pool_h, check_pool_d + offset, sizeof(value_t) * single_batch_size, hipMemcpyDeviceToHost);
for (int i = 0; i < single_batch_size; i++) {
if (check_pool_h[i] != i + 5 + offset) {
++error_cnt;
}
}
if (error_cnt != 0) {
printf("num error:%d \n", error_cnt);
} else {
printf("batch check ok\n");
}
delete[] check_pool_h;
}
void batch_test() {
DynamicCuckoo<512, 512> dy_cuckoo((uint32_t)batch_size * 10 / init_fill_factor, batch_size, lower_bound, upper_bound);
int32_t batch_num = pool_len / batch_size;
int32_t batch_round = batch_num / 10;
GpuTimer timer;
timer.Start();
for (int repeat = 0; repeat < 5; repeat++) {
for (int32_t batch_round_ptr = 0; batch_round_ptr < batch_round; ++batch_round_ptr) {
int batch_ptr = batch_round_ptr * 10;
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_insert(keys_pool_d + (batch_ptr + j) * batch_size,
value_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_search(keys_pool_d + (batch_ptr + j) * batch_size,
check_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < r; j++) {
dy_cuckoo.batch_delete(keys_pool_d + (batch_ptr + j) * batch_size, nullptr, batch_size);
}
//batch_check(check_pool_d, 10 * batch_size, batch_ptr * batch_size);
}
//hipMemset(check_pool_d, 0, 4 * pool_len);
for (int32_t batch_round_ptr = 0; batch_round_ptr < batch_round; ++batch_round_ptr) {
int batch_ptr = batch_round_ptr * 10;
for (int j = 0; j < r; j++) {
dy_cuckoo.batch_insert(keys_pool_d + (batch_ptr + j) * batch_size,
value_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_search(keys_pool_d + (batch_ptr + j) * batch_size,
check_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_delete(keys_pool_d + (batch_ptr + j) * batch_size, nullptr, batch_size);
}
//batch_check(check_pool_d, 10 * batch_size, batch_ptr * batch_size);
}
}
timer.Stop();
double diff = timer.Elapsed() * 1000000;
printf("<throughtput> %.2lf %.2lf\n", (double) diff,
(double) (batch_round * 10 * 4 * batch_size + batch_round * r * 2 * batch_size) * 5 / diff);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
};
int main(int argc, char** argv) {
using test_t = DynamicTest;
if (argc < 7)
{
cout << "para error\n" << endl;
return -1;
}
test_t dy_test;
char* file_name = argv[1];
int pool_len = atoi(argv[2]);
dy_test.pool_len = pool_len;
dy_test.r = atoi(argv[3]);
//batch size: 2e5 4e5 6e5 8e5 10e5
dy_test.batch_size = atoi(argv[4]) / 10;
dy_test.lower_bound = atof(argv[5]);
dy_test.upper_bound = atof(argv[6]);
dy_test.init_fill_factor = atof(argv[7]);
test_t::key_t* keys_h = test_t::read_data(file_name, pool_len);
test_t::value_t *values_h = new test_t::value_t [pool_len], *check_h = new test_t::value_t [pool_len];
for(int i = 0; i < pool_len; i++){
values_h[i] = i + 5;
check_h[i] = 0;
}
checkCudaErrors(hipGetLastError());
hipMalloc((void**)&(dy_test.keys_pool_d), sizeof(test_t::key_t) * pool_len);
hipMalloc((void**)&(dy_test.value_pool_d), sizeof(test_t::value_t) * pool_len);
hipMalloc((void**)&(dy_test.check_pool_d), sizeof(test_t::value_t) * pool_len);
hipMemcpy(dy_test.keys_pool_d, keys_h, sizeof(test_t::key_t) * pool_len, hipMemcpyHostToDevice);
hipMemcpy(dy_test.value_pool_d, values_h, sizeof(test_t::value_t) * pool_len, hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
dy_test.batch_test();
delete []keys_h;
delete []values_h;
delete []check_h;
return 0;
}
| a59cbd54f69c1a7f8af1d49bd135d744cd8219fa.cu | #include <helper_functions.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../tools/gputimer.h"
#include "../data/data_layout.cuh"
#include "../core/dynamic_cuckoo.cuh"
namespace ch = cuckoo_helpers;
using namespace std;
class DynamicTest {
public:
using data_t = DataLayout<>::data_t;
using key_t = DataLayout<>::key_t;
using value_t = DataLayout<>::value_t;
int r = 2;
int batch_size = 100000; //smaller batch size: 2e4 4e4 6e4 8e4 10e4
double lower_bound = 0.5; //lower bound: 0.3 0.35 0.4 0.45 0.5
double upper_bound = 0.85; //upper bound: 0.7 0.75 0.8 0.85 0.9
int pool_len = 0;
key_t *keys_pool_d;
value_t *value_pool_d, *check_pool_d;
double init_fill_factor = 0.85;
static key_t *read_data(char *file_name, int data_len) {
FILE *fid;
fid = fopen(file_name, "rb");
key_t *pos = (key_t *) malloc(sizeof(key_t) * data_len);
if (fid == NULL) {
printf("file not found.\n");
return pos;
}
fread(pos, sizeof(unsigned int), data_len, fid);
fclose(fid);
return pos;
}
void batch_check(value_t *check_pool_d, int32_t single_batch_size, uint32_t offset) {
uint32_t error_cnt = 0;
value_t *check_pool_h = new value_t[single_batch_size];
cudaMemcpy(check_pool_h, check_pool_d + offset, sizeof(value_t) * single_batch_size, cudaMemcpyDeviceToHost);
for (int i = 0; i < single_batch_size; i++) {
if (check_pool_h[i] != i + 5 + offset) {
++error_cnt;
}
}
if (error_cnt != 0) {
printf("num error:%d \n", error_cnt);
} else {
printf("batch check ok\n");
}
delete[] check_pool_h;
}
void batch_test() {
DynamicCuckoo<512, 512> dy_cuckoo((uint32_t)batch_size * 10 / init_fill_factor, batch_size, lower_bound, upper_bound);
int32_t batch_num = pool_len / batch_size;
int32_t batch_round = batch_num / 10;
GpuTimer timer;
timer.Start();
for (int repeat = 0; repeat < 5; repeat++) {
for (int32_t batch_round_ptr = 0; batch_round_ptr < batch_round; ++batch_round_ptr) {
int batch_ptr = batch_round_ptr * 10;
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_insert(keys_pool_d + (batch_ptr + j) * batch_size,
value_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_search(keys_pool_d + (batch_ptr + j) * batch_size,
check_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < r; j++) {
dy_cuckoo.batch_delete(keys_pool_d + (batch_ptr + j) * batch_size, nullptr, batch_size);
}
//batch_check(check_pool_d, 10 * batch_size, batch_ptr * batch_size);
}
//cudaMemset(check_pool_d, 0, 4 * pool_len);
for (int32_t batch_round_ptr = 0; batch_round_ptr < batch_round; ++batch_round_ptr) {
int batch_ptr = batch_round_ptr * 10;
for (int j = 0; j < r; j++) {
dy_cuckoo.batch_insert(keys_pool_d + (batch_ptr + j) * batch_size,
value_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_search(keys_pool_d + (batch_ptr + j) * batch_size,
check_pool_d + (batch_ptr + j) * batch_size, batch_size);
}
for (int j = 0; j < 10; j++) {
dy_cuckoo.batch_delete(keys_pool_d + (batch_ptr + j) * batch_size, nullptr, batch_size);
}
//batch_check(check_pool_d, 10 * batch_size, batch_ptr * batch_size);
}
}
timer.Stop();
double diff = timer.Elapsed() * 1000000;
printf("<throughtput> %.2lf %.2lf\n", (double) diff,
(double) (batch_round * 10 * 4 * batch_size + batch_round * r * 2 * batch_size) * 5 / diff);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
};
int main(int argc, char** argv) {
using test_t = DynamicTest;
if (argc < 7)
{
cout << "para error\n" << endl;
return -1;
}
test_t dy_test;
char* file_name = argv[1];
int pool_len = atoi(argv[2]);
dy_test.pool_len = pool_len;
dy_test.r = atoi(argv[3]);
//batch size: 2e5 4e5 6e5 8e5 10e5
dy_test.batch_size = atoi(argv[4]) / 10;
dy_test.lower_bound = atof(argv[5]);
dy_test.upper_bound = atof(argv[6]);
dy_test.init_fill_factor = atof(argv[7]);
test_t::key_t* keys_h = test_t::read_data(file_name, pool_len);
test_t::value_t *values_h = new test_t::value_t [pool_len], *check_h = new test_t::value_t [pool_len];
for(int i = 0; i < pool_len; i++){
values_h[i] = i + 5;
check_h[i] = 0;
}
checkCudaErrors(cudaGetLastError());
cudaMalloc((void**)&(dy_test.keys_pool_d), sizeof(test_t::key_t) * pool_len);
cudaMalloc((void**)&(dy_test.value_pool_d), sizeof(test_t::value_t) * pool_len);
cudaMalloc((void**)&(dy_test.check_pool_d), sizeof(test_t::value_t) * pool_len);
cudaMemcpy(dy_test.keys_pool_d, keys_h, sizeof(test_t::key_t) * pool_len, cudaMemcpyHostToDevice);
cudaMemcpy(dy_test.value_pool_d, values_h, sizeof(test_t::value_t) * pool_len, cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
dy_test.batch_test();
delete []keys_h;
delete []values_h;
delete []check_h;
return 0;
}
|
e775f4d7ded0012a18d11ed6e234d9206fe70dd2.hip | // !!! This is a file automatically generated by hipify!!!
// MAPS - Memory Access Pattern Specification Framework
// http://maps-gpu.github.io/
// Copyright (c) 2015, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <cmath>
#include <random>
#include <string>
#include <sstream>
#include <hip/hip_runtime.h>
#include <gtest/gtest.h>
#include "cuda_gtest_utils.h"
#include <maps/input_containers/internal/io_common.cuh>
#include <maps/input_containers/internal/io_globalread.cuh>
#include <maps/input_containers/internal/io_globaltoshared.cuh>
#include <maps/input_containers/internal/io_globaltoarray.cuh>
#include <maps/multi/multi.cuh>
#include <maps/multi/pinned_allocation.h>
// Test Window (ND) by convolution (1-3 dimensions, varying sizes)
// CUDA block dimensions
#define BW 8
#define BH 8
#define BD 8
// Convolution radius range
#define MIN_WINDOW_RADIUS 0
#define MAX_WINDOW_RADIUS 6
// For repeatable randomization (kernels)
static const int kRandomSeed1 = 1234;
// For repeatable randomization (images)
static const int kRandomSeed2 = 4321;
// For comparing float values
static const float kEpsilon = 1e-6f;
// Sizes (exhaustive, uses each of the permutations, M^N)
static const unsigned int kSizes[] = {
1,
3,
32,
128,
192,
600,
608,
};
// 1 - 3 dimensions
__constant__ float kConvKernel[(MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1)];
template <int DIMS, int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[DIMS]);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int DIMS, int BLOCK_WIDTH, int BLOCK_HEIGHT, int BLOCK_DEPTH, int RADIUS, maps::BorderBehavior BORDERS>
__global__ void MAPSConvolution(maps::WindowSingleGPU<float, DIMS, BLOCK_WIDTH, BLOCK_HEIGHT, BLOCK_DEPTH, RADIUS, 1, 1, 1, BORDERS> in,
maps::StructuredInjectiveSingleGPU<float, DIMS, BLOCK_WIDTH, BLOCK_HEIGHT, BLOCK_DEPTH> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
float result = 0.0f;
int i = 0;
#pragma unroll
MAPS_FOREACH_ALIGNED(iter, in, oiter)
{
//printf("Index %d: Iter: %f\n", i, *iter);
result += *iter * kConvKernel[i++];
}
*oiter = result;
}
out.commit();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement1D(const float *buffer, int width, int x)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[x];
case maps::WB_COPY:
return buffer[maps::Clamp(x, 0, width - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(x, width)];
case maps::WB_ZERO:
if (x < 0 || x >= width)
return 0.0f;
return buffer[x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv1Regression(const float *buffer, int width,
float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
local_result += kConvKernel[i] * GetElement1D<BORDERS>(buffer, width, x + i - WINDOW_RADIUS);
result[x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<1, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[1])
{
ASSERT_EQ(hipSetDevice(0), hipSuccess);
dim3 block_dims(BW);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW));
hipLaunchKernelGGL(( Conv1Regression<RADIUS, BORDERS>) , dim3(grid_dims), dim3(block_dims), 0, 0,
device_in, buffer_size[0], device_out);
ASSERT_EQ(hipGetLastError(), hipSuccess);
ASSERT_EQ(hipMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0],
hipMemcpyDeviceToHost), hipSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement2D(const float *buffer, int width,
int height, unsigned int stride,
int x, int y)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[y * stride + x];
case maps::WB_COPY:
return buffer[maps::Clamp(y, 0, height - 1) * stride + maps::Clamp(x, 0, width - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(y, height) * stride + maps::Wrap(x, width)];
case maps::WB_ZERO:
if (y < 0 || y >= height || x < 0 || x >= width)
return 0.0f;
return buffer[y * stride + x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv2Regression(const float *buffer, int width,
int height, unsigned int stride,
float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
{
#pragma unroll
for (int j = 0; j < WINDOW_RADIUS * 2 + 1; ++j)
{
local_result += kConvKernel[i * (WINDOW_RADIUS * 2 + 1) + j] *
GetElement2D<BORDERS>(buffer, width, height, stride,
x + j - WINDOW_RADIUS, y + i - WINDOW_RADIUS);
}
}
result[y * stride + x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<2, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[2])
{
ASSERT_EQ(hipSetDevice(0), hipSuccess);
dim3 block_dims(BW, BH);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
maps::RoundUp(buffer_size[1], BH));
hipLaunchKernelGGL(( Conv2Regression<RADIUS, BORDERS>) , dim3(grid_dims), dim3(block_dims), 0, 0,
device_in, buffer_size[0], buffer_size[1],
buffer_size[0], device_out);
ASSERT_EQ(hipGetLastError(), hipSuccess);
ASSERT_EQ(hipMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0] * buffer_size[1],
hipMemcpyDeviceToHost), hipSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement3D(const float *buffer, int lx, int ly,
int lz, unsigned int stride,
int x, int y, int z)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[z * ly * stride + y * stride + x];
case maps::WB_COPY:
return buffer[maps::Clamp(z, 0, lz - 1) * ly * stride + maps::Clamp(y, 0, ly - 1) * stride + maps::Clamp(x, 0, lx - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(z, lz) * ly * stride + maps::Wrap(y, ly) * stride + maps::Wrap(x, lx)];
case maps::WB_ZERO:
if (z < 0 || z >= lz || y < 0 || y >= ly || x < 0 || x >= lx)
return 0.0f;
return buffer[z * ly * stride + y * stride + x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv3Regression(const float *buffer, int lx,
int ly, int lz,
unsigned int stride, float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= lx || y >= ly || z >= lz)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
{
#pragma unroll
for (int j = 0; j < WINDOW_RADIUS * 2 + 1; ++j)
{
#pragma unroll
for (int k = 0; k < WINDOW_RADIUS * 2 + 1; ++k)
{
int ind = i * (WINDOW_RADIUS * 2 + 1) * (WINDOW_RADIUS * 2 + 1) +
j * (WINDOW_RADIUS * 2 + 1) + k;
local_result += kConvKernel[ind] *
GetElement3D<BORDERS>(buffer, lx, ly, lz, stride,
x + k - WINDOW_RADIUS,
y + j - WINDOW_RADIUS,
z + i - WINDOW_RADIUS);
}
}
}
result[z * ly * stride + y * stride + x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<3, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[3])
{
ASSERT_EQ(hipSetDevice(0), hipSuccess);
dim3 block_dims(BW, BH, BD);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
maps::RoundUp(buffer_size[1], BH),
maps::RoundUp(buffer_size[2], BD));
hipLaunchKernelGGL(( Conv3Regression<RADIUS, BORDERS>), dim3(grid_dims), dim3(block_dims), 0, 0,
device_in, buffer_size[0], buffer_size[1],
buffer_size[2], buffer_size[0], device_out);
ASSERT_EQ(hipGetLastError(), hipSuccess);
ASSERT_EQ(hipMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0] * buffer_size[1] * buffer_size[2],
hipMemcpyDeviceToHost), hipSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// Note that the "Unit" convolution tests also check the "Regression" functions for errors.
inline void CheckDevices(int& num_gpus)
{
num_gpus = 0;
ASSERT_EQ(hipGetDeviceCount(&num_gpus), hipSuccess);
ASSERT_GE(num_gpus, 1);
}
static inline const char *BoundariesToString(maps::BorderBehavior borders)
{
switch (borders)
{
default:
return "N/A";
case maps::WB_NOCHECKS:
return "unchecked";
case maps::WB_WRAP:
return "wrapped";
case maps::WB_COPY:
return "clamped";
case maps::WB_ZERO:
return "zero";
}
}
template <int DIMS>
std::string BufferSize(unsigned int buffer_size[DIMS])
{
std::stringstream ss;
ss << buffer_size[0];
for (int i = 1; i < DIMS; ++i)
hipLaunchKernelGGL(( ss << "x" << buffer_size[i];
return ss.str();
}
template <int DIMS, int RADIUS, maps::BorderBehavior BORDERS>
void TestWindow(bool random_kernel, unsigned int buffer_size[DIMS],
float *host_kernel, float *buffer_in, float *buffer_out,
float *buffer_regression, float *device_in, float *device_out)
{
int num_gpus = 0;
CheckDevices(num_gpus);
// Verify that the buffer is large enough to handle NOCHECKS
if (BORDERS == maps::WB_NOCHECKS)
{
for (int i = 0; i < DIMS; ++i)
{
if (buffer_size[i] < (RADIUS * 2 + 1))
{
printf("Buffer size mismatch (dim[%d] = %d (< %d)), skipping test\n", i, buffer_size[i], RADIUS * 2 + 1);
return;
}
}
}
// Verify that wrap will work (it will not wrap the buffer more than once)
if (BORDERS == maps::WB_WRAP)
{
for (int i = 0; i < DIMS; ++i)
{
// Skip
if (RADIUS > (int)buffer_size[i])
return;
}
}
unsigned int kernelsize = maps::Power<2 * RADIUS + 1, DIMS>::value;
// Prepare random number generator.
std::mt19937 gen(kRandomSeed1);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
// Randomize if necessary, otherwise create a "unit" kernel.
if (random_kernel)
{
for (unsigned int i = 0; i < kernelsize; ++i)
{
host_kernel[i] = ud(gen);
}
}
else
{
memset(host_kernel, 0, sizeof(float) * kernelsize);
host_kernel[kernelsize / 2] = 1.0f;
}
// Copy convolution kernel to all GPUs.
for (int i = 0; i < num_gpus; ++i)
{
ASSERT_EQ(hipSetDevice(i), hipSuccess);
ASSERT_EQ(hipMemcpyToSymbol(kConvKernel, &host_kernel[0],
sizeof(float) * kernelsize),
hipSuccess);
}
ASSERT_EQ(hipSetDevice(0), hipSuccess);
size_t total_size = 1;
for (int i = 0; i < DIMS; ++i)
total_size *= buffer_size[i];
maps::WindowSingleGPU<float, DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), RADIUS, 1, 1, 1, BORDERS> win;
win.m_ptr = device_in;
win.m_stride = buffer_size[0];
maps::StructuredInjectiveSingleGPU<float, DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), 1, 1> soout;
soout.m_ptr = device_out;
soout.m_stride = buffer_size[0];
for (int i = 0; i < DIMS; ++i)
{
win.m_dimensions[i] = buffer_size[i];
soout.m_dimensions[i] = buffer_size[i];
}
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
(DIMS >= 2) ? maps::RoundUp(buffer_size[1], BH) : 1,
(DIMS >= 3) ? maps::RoundUp(buffer_size[2], BD) : 1);
dim3 block_dims(BW,
(DIMS >= 2) ? BH : 1,
(DIMS >= 3) ? BD : 1);
MAPSConvolution<DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), RADIUS, BORDERS>) , dim3(grid_dims), dim3(block_dims), 0, 0, win, soout);
CUASSERT_NOERR(hipGetLastError());
CUASSERT_NOERR(hipDeviceSynchronize());
CUASSERT_NOERR(hipMemcpy(buffer_out, device_out, sizeof(float) * total_size, hipMemcpyDeviceToHost));
// If unit kernel, verify by testing equality to source buffer.
if (!random_kernel)
{
for (size_t i = 0; i < total_size; ++i)
ASSERT_LE(fabs(buffer_in[i] - buffer_out[i]), kEpsilon)
<< "Unequal values in unit convolution, index " << i
<< " (" << buffer_out[i] << " != " << buffer_in[i]
<< ") when convolving a " << BufferSize<DIMS>(buffer_size)
<< " buffer with a kernel with radius " << RADIUS
<< ", using " << BoundariesToString(BORDERS) << " boundaries";
}
else // Otherwise, use regression
{
ConvRegression<DIMS, RADIUS, BORDERS>::RunConvRegression(
device_in, device_out, buffer_regression, buffer_size);
for (size_t i = 0; i < total_size; ++i)
ASSERT_LE(fabs(buffer_regression[i] - buffer_out[i]), kEpsilon)
<< "Unequal values in randomized convolution, index " << i
<< " (" << buffer_out[i] << " != " << buffer_regression[i]
<< ") when convolving a " << BufferSize<DIMS>(buffer_size)
<< " buffer with a kernel with radius " << RADIUS
<< ", using " << BoundariesToString(BORDERS) << " boundaries";
}
}
template<int DIMS, bool RANDOMIZED, int RAD_COUNTER, int RAD_END>
struct WindowRadiusLoop
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_COUNTER, maps::WB_WRAP>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_COUNTER, maps::WB_COPY>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_COUNTER, maps::WB_ZERO>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
WindowRadiusLoop<DIMS, RANDOMIZED, RAD_COUNTER + 1, RAD_END>::Loop(size, hKernel, hIn, hOut,
hRegression, dRegressionIn,
dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_END>
struct WindowRadiusLoop<DIMS, RANDOMIZED, RAD_END, RAD_END>
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_END, maps::WB_WRAP>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_END, maps::WB_COPY>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_END, maps::WB_ZERO>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_COUNTER, int RAD_END>
struct WindowRadiusLoopNoChecks
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_COUNTER, maps::WB_NOCHECKS>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
WindowRadiusLoopNoChecks<DIMS, RANDOMIZED, RAD_COUNTER + 1, RAD_END>::Loop(
size, hKernel, hIn, hOut, hRegression, dRegressionIn, dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_END>
struct WindowRadiusLoopNoChecks<DIMS, RANDOMIZED, RAD_END, RAD_END>
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_END, maps::WB_NOCHECKS>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
}
};
TEST(Window, Window1DUnit)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(hipSetDevice(0), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionIn, max_buffer_size), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionOut, max_buffer_size), hipSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(hipMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
hipMemcpyHostToDevice), hipSuccess);
// Loop over the various buffer sizes.
for (int size_ind = 0; size_ind < sizeof(kSizes) / sizeof(unsigned int); ++size_ind)
{
unsigned int size[1] = { kSizes[size_ind] };
WindowRadiusLoop<1, false, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
ASSERT_EQ(hipFree(dRegressionIn), hipSuccess);
ASSERT_EQ(hipFree(dRegressionOut), hipSuccess);
}
TEST(Window, Window1DRandom)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(hipSetDevice(0), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionIn, max_buffer_size), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionOut, max_buffer_size), hipSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(hipMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
hipMemcpyHostToDevice), hipSuccess);
// Loop over the various buffer sizes.
for (int size_ind = 0; size_ind < sizeof(kSizes) / sizeof(unsigned int); ++size_ind)
{
unsigned int size[1] = { kSizes[size_ind] };
WindowRadiusLoop<1, true, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
ASSERT_EQ(hipFree(dRegressionIn), hipSuccess);
ASSERT_EQ(hipFree(dRegressionOut), hipSuccess);
}
TEST(Window, Window2DUnit)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(hipSetDevice(0), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionIn, max_buffer_size), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionOut, max_buffer_size), hipSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(hipMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
hipMemcpyHostToDevice), hipSuccess);
// Loop over the various buffer sizes.
for (int size_ind1 = 0; size_ind1 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind1)
{
for (int size_ind2 = 0; size_ind2 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind2)
{
unsigned int size[2] = { kSizes[size_ind1], kSizes[size_ind2] };
WindowRadiusLoop<2, false, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
}
ASSERT_EQ(hipFree(dRegressionIn), hipSuccess);
ASSERT_EQ(hipFree(dRegressionOut), hipSuccess);
}
TEST(Window, Window2DRandom)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(hipSetDevice(0), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionIn, max_buffer_size), hipSuccess);
ASSERT_EQ(hipMalloc(&dRegressionOut, max_buffer_size), hipSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(hipMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
hipMemcpyHostToDevice), hipSuccess);
// Loop over the various buffer sizes.
for (int size_ind1 = 0; size_ind1 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind1)
{
for (int size_ind2 = 0; size_ind2 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind2)
{
unsigned int size[2] = { kSizes[size_ind1], kSizes[size_ind2] };
WindowRadiusLoop<2, true, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
}
ASSERT_EQ(hipFree(dRegressionIn), hipSuccess);
ASSERT_EQ(hipFree(dRegressionOut), hipSuccess);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <int BLOCK_WIDTH, int RADIUS>
__global__ void RelativeIndex1DKernel(maps::WindowSingleGPU<float, 1, BLOCK_WIDTH, 1, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 1, BLOCK_WIDTH, 1, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// O X X
*out.begin() = in.at(-1);
out.commit();
}
TEST(Window, RelativeIndex1D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCKS = 5,
TOTAL_SIZE = BLOCK_WIDTH * BLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(hipMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(hipMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(hipMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, hipMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 1, BLOCK_WIDTH, 1, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_SIZE;
maps::StructuredInjectiveSingleGPU<float, 1, BLOCK_WIDTH, 1, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_SIZE;
// Run test
hipLaunchKernelGGL(( RelativeIndex1DKernel<BLOCK_WIDTH, 1>), dim3(BLOCKS), dim3(BLOCK_WIDTH), 0, 0, win, soout);
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
CUASSERT_NOERR(hipMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, hipMemcpyDeviceToHost));
for (int i = 0; i < TOTAL_SIZE; ++i)
ASSERT_EQ(out_val[i], in_val[maps::Wrap(i - 1, TOTAL_SIZE)]) << "at index " << i;
// Free GPU memory
CUASSERT_NOERR(hipFree(d_in));
CUASSERT_NOERR(hipFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT, int RADIUS>
__global__ void RelativeIndex2DKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// X X X
// X X X
// X X O
*out.begin() = in.at(1, 1);
out.commit();
}
TEST(Window, RelativeIndex2D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
XBLOCKS = 2,
YBLOCKS = 3,
TOTAL_SIZE = BLOCK_WIDTH * BLOCK_HEIGHT * XBLOCKS * YBLOCKS,
TOTAL_WIDTH = BLOCK_WIDTH * XBLOCKS,
TOTAL_HEIGHT = BLOCK_HEIGHT * YBLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(hipMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(hipMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(hipMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, hipMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
hipLaunchKernelGGL(( RelativeIndex2DKernel<BLOCK_WIDTH, BLOCK_HEIGHT, 1>) , dim3(dim3(XBLOCKS, YBLOCKS)), dim3(dim3(BLOCK_WIDTH, BLOCK_HEIGHT)), 0, 0, win, soout);
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
CUASSERT_NOERR(hipMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, hipMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x],
in_val[maps::Wrap(y + 1, TOTAL_HEIGHT) * TOTAL_WIDTH + maps::Wrap(x + 1, TOTAL_WIDTH)])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(hipFree(d_in));
CUASSERT_NOERR(hipFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT, int RADIUS>
__global__ void RelativeIndexAligned2DKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// X O X
// O X O
// X O X
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = in.aligned_at(oiter, 0, -1) +
in.aligned_at(oiter, -1, 0) +
in.aligned_at(oiter, 1, 0) +
in.aligned_at(oiter, 0, 1);
}
out.commit();
}
TEST(Window, RelativeIndexAligned2D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
XBLOCKS = 2,
YBLOCKS = 3,
TOTAL_SIZE = BLOCK_WIDTH * BLOCK_HEIGHT * XBLOCKS * YBLOCKS,
TOTAL_WIDTH = BLOCK_WIDTH * XBLOCKS,
TOTAL_HEIGHT = BLOCK_HEIGHT * YBLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(hipMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(hipMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(hipMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, hipMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
hipLaunchKernelGGL(( RelativeIndexAligned2DKernel<BLOCK_WIDTH, BLOCK_HEIGHT, 1>) , dim3(dim3(XBLOCKS, YBLOCKS)), dim3(dim3(BLOCK_WIDTH, BLOCK_HEIGHT)), 0, 0, win, soout);
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
CUASSERT_NOERR(hipMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, hipMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x],
(in_val[maps::Wrap(y - 1, TOTAL_HEIGHT) * TOTAL_WIDTH + x] +
in_val[y * TOTAL_WIDTH + maps::Wrap(x - 1, TOTAL_WIDTH)] +
in_val[y * TOTAL_WIDTH + maps::Wrap(x + 1, TOTAL_WIDTH)] +
in_val[maps::Wrap(y + 1, TOTAL_HEIGHT) * TOTAL_WIDTH + x]))
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(hipFree(d_in));
CUASSERT_NOERR(hipFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT>
__global__ void NoRadiusSingleGPUKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
MAPS_INIT(in, out);
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = *in.align(oiter);
}
out.commit();
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT>
__global__ void NoRadiusMultiGPUKernel(MAPS_MULTIDEF2,
maps::Window<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> in,
maps::StructuredInjective2D<float, BLOCK_WIDTH, BLOCK_HEIGHT> out)
{
MAPS_MULTI_INITVARS(in, out);
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = *in.align(oiter);
}
out.commit();
}
TEST(Window, NoRadiusSingleGPU)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(hipMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(hipMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(hipMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, hipMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
hipLaunchKernelGGL(( NoRadiusSingleGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT>) , dim3(dim3(XBLOCKS, YBLOCKS)), dim3(dim3(BLOCK_WIDTH, BLOCK_HEIGHT)), 0, 0, win, soout);
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
CUASSERT_NOERR(hipMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, hipMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(hipFree(d_in));
CUASSERT_NOERR(hipFree(d_out));
}
TEST(Window, NoRadiusMultiGPU)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(hipMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(hipMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(hipMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, hipMemcpyHostToDevice));
// Create structures
maps::Window<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
win.m_containsApron = true;
win.block_offset = 0;
win.m_gridWidth = XBLOCKS;
maps::StructuredInjective2D<float, BLOCK_WIDTH, BLOCK_HEIGHT> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
soout.grid_dims = dim3(XBLOCKS, YBLOCKS);
soout.blockId = make_uint3(0,0,0);
// Run test
hipLaunchKernelGGL(( NoRadiusMultiGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT>) , dim3(dim3(XBLOCKS * YBLOCKS)), dim3(dim3(BLOCK_WIDTH, BLOCK_HEIGHT)), 0, 0,
0, dim3(XBLOCKS, YBLOCKS), make_uint3(0,0,0), win, soout);
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
CUASSERT_NOERR(hipMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, hipMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(hipFree(d_in));
CUASSERT_NOERR(hipFree(d_out));
}
TEST(Window, NoRadiusMAPSMulti)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
maps::multi::Matrix<float> in(TOTAL_WIDTH, TOTAL_HEIGHT),
out(TOTAL_WIDTH, TOTAL_HEIGHT);
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Bind matrices
in.Bind(&in_val[0]);
out.Bind(&out_val[0]);
maps::multi::Scheduler sched{0};
sched.AnalyzeCall(dim3(), dim3(BLOCK_WIDTH, BLOCK_HEIGHT),
maps::multi::Window2D<float, BLOCK_WIDTH, BLOCK_HEIGHT, 0>(in),
maps::multi::StructuredInjectiveMatrixO<float>(out));
// Run test
sched.Invoke(NoRadiusMultiGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT>, dim3(), dim3(BLOCK_WIDTH, BLOCK_HEIGHT),
maps::multi::Window2D<float, BLOCK_WIDTH, BLOCK_HEIGHT, 0>(in),
maps::multi::StructuredInjectiveMatrixO<float>(out));
CUASSERT_NOERR(hipDeviceSynchronize());
// Copy output
sched.Gather<false>(out);
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
}
| e775f4d7ded0012a18d11ed6e234d9206fe70dd2.cu | // MAPS - Memory Access Pattern Specification Framework
// http://maps-gpu.github.io/
// Copyright (c) 2015, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <cmath>
#include <random>
#include <string>
#include <sstream>
#include <cuda_runtime.h>
#include <gtest/gtest.h>
#include "cuda_gtest_utils.h"
#include <maps/input_containers/internal/io_common.cuh>
#include <maps/input_containers/internal/io_globalread.cuh>
#include <maps/input_containers/internal/io_globaltoshared.cuh>
#include <maps/input_containers/internal/io_globaltoarray.cuh>
#include <maps/multi/multi.cuh>
#include <maps/multi/pinned_allocation.h>
// Test Window (ND) by convolution (1-3 dimensions, varying sizes)
// CUDA block dimensions
#define BW 8
#define BH 8
#define BD 8
// Convolution radius range
#define MIN_WINDOW_RADIUS 0
#define MAX_WINDOW_RADIUS 6
// For repeatable randomization (kernels)
static const int kRandomSeed1 = 1234;
// For repeatable randomization (images)
static const int kRandomSeed2 = 4321;
// For comparing float values
static const float kEpsilon = 1e-6f;
// Sizes (exhaustive, uses each of the permutations, M^N)
static const unsigned int kSizes[] = {
1,
3,
32,
128,
192,
600,
608,
};
// 1 - 3 dimensions
__constant__ float kConvKernel[(MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1)];
template <int DIMS, int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[DIMS]);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int DIMS, int BLOCK_WIDTH, int BLOCK_HEIGHT, int BLOCK_DEPTH, int RADIUS, maps::BorderBehavior BORDERS>
__global__ void MAPSConvolution(maps::WindowSingleGPU<float, DIMS, BLOCK_WIDTH, BLOCK_HEIGHT, BLOCK_DEPTH, RADIUS, 1, 1, 1, BORDERS> in,
maps::StructuredInjectiveSingleGPU<float, DIMS, BLOCK_WIDTH, BLOCK_HEIGHT, BLOCK_DEPTH> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
float result = 0.0f;
int i = 0;
#pragma unroll
MAPS_FOREACH_ALIGNED(iter, in, oiter)
{
//printf("Index %d: Iter: %f\n", i, *iter);
result += *iter * kConvKernel[i++];
}
*oiter = result;
}
out.commit();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement1D(const float *buffer, int width, int x)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[x];
case maps::WB_COPY:
return buffer[maps::Clamp(x, 0, width - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(x, width)];
case maps::WB_ZERO:
if (x < 0 || x >= width)
return 0.0f;
return buffer[x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv1Regression(const float *buffer, int width,
float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
local_result += kConvKernel[i] * GetElement1D<BORDERS>(buffer, width, x + i - WINDOW_RADIUS);
result[x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<1, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[1])
{
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
dim3 block_dims(BW);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW));
Conv1Regression<RADIUS, BORDERS> <<<grid_dims, block_dims>>>(
device_in, buffer_size[0], device_out);
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
ASSERT_EQ(cudaMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0],
cudaMemcpyDeviceToHost), cudaSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement2D(const float *buffer, int width,
int height, unsigned int stride,
int x, int y)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[y * stride + x];
case maps::WB_COPY:
return buffer[maps::Clamp(y, 0, height - 1) * stride + maps::Clamp(x, 0, width - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(y, height) * stride + maps::Wrap(x, width)];
case maps::WB_ZERO:
if (y < 0 || y >= height || x < 0 || x >= width)
return 0.0f;
return buffer[y * stride + x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv2Regression(const float *buffer, int width,
int height, unsigned int stride,
float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
{
#pragma unroll
for (int j = 0; j < WINDOW_RADIUS * 2 + 1; ++j)
{
local_result += kConvKernel[i * (WINDOW_RADIUS * 2 + 1) + j] *
GetElement2D<BORDERS>(buffer, width, height, stride,
x + j - WINDOW_RADIUS, y + i - WINDOW_RADIUS);
}
}
result[y * stride + x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<2, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[2])
{
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
dim3 block_dims(BW, BH);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
maps::RoundUp(buffer_size[1], BH));
Conv2Regression<RADIUS, BORDERS> <<<grid_dims, block_dims>>>(
device_in, buffer_size[0], buffer_size[1],
buffer_size[0], device_out);
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
ASSERT_EQ(cudaMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0] * buffer_size[1],
cudaMemcpyDeviceToHost), cudaSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<maps::BorderBehavior BORDERS>
__device__ float GetElement3D(const float *buffer, int lx, int ly,
int lz, unsigned int stride,
int x, int y, int z)
{
switch (BORDERS)
{
default:
case maps::WB_NOCHECKS:
return buffer[z * ly * stride + y * stride + x];
case maps::WB_COPY:
return buffer[maps::Clamp(z, 0, lz - 1) * ly * stride + maps::Clamp(y, 0, ly - 1) * stride + maps::Clamp(x, 0, lx - 1)];
case maps::WB_WRAP:
return buffer[maps::Wrap(z, lz) * ly * stride + maps::Wrap(y, ly) * stride + maps::Wrap(x, lx)];
case maps::WB_ZERO:
if (z < 0 || z >= lz || y < 0 || y >= ly || x < 0 || x >= lx)
return 0.0f;
return buffer[z * ly * stride + y * stride + x];
}
}
template<int WINDOW_RADIUS, maps::BorderBehavior BORDERS>
__global__ void Conv3Regression(const float *buffer, int lx,
int ly, int lz,
unsigned int stride, float *result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= lx || y >= ly || z >= lz)
return;
float local_result = 0.0f;
#pragma unroll
for (int i = 0; i < WINDOW_RADIUS * 2 + 1; ++i)
{
#pragma unroll
for (int j = 0; j < WINDOW_RADIUS * 2 + 1; ++j)
{
#pragma unroll
for (int k = 0; k < WINDOW_RADIUS * 2 + 1; ++k)
{
int ind = i * (WINDOW_RADIUS * 2 + 1) * (WINDOW_RADIUS * 2 + 1) +
j * (WINDOW_RADIUS * 2 + 1) + k;
local_result += kConvKernel[ind] *
GetElement3D<BORDERS>(buffer, lx, ly, lz, stride,
x + k - WINDOW_RADIUS,
y + j - WINDOW_RADIUS,
z + i - WINDOW_RADIUS);
}
}
}
result[z * ly * stride + y * stride + x] = local_result;
}
template <int RADIUS, maps::BorderBehavior BORDERS>
struct ConvRegression<3, RADIUS, BORDERS>
{
static void RunConvRegression(float *device_in, float *device_out,
float *host_output, unsigned int buffer_size[3])
{
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
dim3 block_dims(BW, BH, BD);
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
maps::RoundUp(buffer_size[1], BH),
maps::RoundUp(buffer_size[2], BD));
Conv3Regression<RADIUS, BORDERS><<<grid_dims, block_dims>>>(
device_in, buffer_size[0], buffer_size[1],
buffer_size[2], buffer_size[0], device_out);
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
ASSERT_EQ(cudaMemcpy(host_output, device_out,
sizeof(float) * buffer_size[0] * buffer_size[1] * buffer_size[2],
cudaMemcpyDeviceToHost), cudaSuccess);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// Note that the "Unit" convolution tests also check the "Regression" functions for errors.
inline void CheckDevices(int& num_gpus)
{
num_gpus = 0;
ASSERT_EQ(cudaGetDeviceCount(&num_gpus), cudaSuccess);
ASSERT_GE(num_gpus, 1);
}
static inline const char *BoundariesToString(maps::BorderBehavior borders)
{
switch (borders)
{
default:
return "N/A";
case maps::WB_NOCHECKS:
return "unchecked";
case maps::WB_WRAP:
return "wrapped";
case maps::WB_COPY:
return "clamped";
case maps::WB_ZERO:
return "zero";
}
}
template <int DIMS>
std::string BufferSize(unsigned int buffer_size[DIMS])
{
std::stringstream ss;
ss << buffer_size[0];
for (int i = 1; i < DIMS; ++i)
ss << "x" << buffer_size[i];
return ss.str();
}
template <int DIMS, int RADIUS, maps::BorderBehavior BORDERS>
void TestWindow(bool random_kernel, unsigned int buffer_size[DIMS],
float *host_kernel, float *buffer_in, float *buffer_out,
float *buffer_regression, float *device_in, float *device_out)
{
int num_gpus = 0;
CheckDevices(num_gpus);
// Verify that the buffer is large enough to handle NOCHECKS
if (BORDERS == maps::WB_NOCHECKS)
{
for (int i = 0; i < DIMS; ++i)
{
if (buffer_size[i] < (RADIUS * 2 + 1))
{
printf("Buffer size mismatch (dim[%d] = %d (< %d)), skipping test\n", i, buffer_size[i], RADIUS * 2 + 1);
return;
}
}
}
// Verify that wrap will work (it will not wrap the buffer more than once)
if (BORDERS == maps::WB_WRAP)
{
for (int i = 0; i < DIMS; ++i)
{
// Skip
if (RADIUS > (int)buffer_size[i])
return;
}
}
unsigned int kernelsize = maps::Power<2 * RADIUS + 1, DIMS>::value;
// Prepare random number generator.
std::mt19937 gen(kRandomSeed1);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
// Randomize if necessary, otherwise create a "unit" kernel.
if (random_kernel)
{
for (unsigned int i = 0; i < kernelsize; ++i)
{
host_kernel[i] = ud(gen);
}
}
else
{
memset(host_kernel, 0, sizeof(float) * kernelsize);
host_kernel[kernelsize / 2] = 1.0f;
}
// Copy convolution kernel to all GPUs.
for (int i = 0; i < num_gpus; ++i)
{
ASSERT_EQ(cudaSetDevice(i), cudaSuccess);
ASSERT_EQ(cudaMemcpyToSymbol(kConvKernel, &host_kernel[0],
sizeof(float) * kernelsize),
cudaSuccess);
}
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
size_t total_size = 1;
for (int i = 0; i < DIMS; ++i)
total_size *= buffer_size[i];
maps::WindowSingleGPU<float, DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), RADIUS, 1, 1, 1, BORDERS> win;
win.m_ptr = device_in;
win.m_stride = buffer_size[0];
maps::StructuredInjectiveSingleGPU<float, DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), 1, 1> soout;
soout.m_ptr = device_out;
soout.m_stride = buffer_size[0];
for (int i = 0; i < DIMS; ++i)
{
win.m_dimensions[i] = buffer_size[i];
soout.m_dimensions[i] = buffer_size[i];
}
dim3 grid_dims(maps::RoundUp(buffer_size[0], BW),
(DIMS >= 2) ? maps::RoundUp(buffer_size[1], BH) : 1,
(DIMS >= 3) ? maps::RoundUp(buffer_size[2], BD) : 1);
dim3 block_dims(BW,
(DIMS >= 2) ? BH : 1,
(DIMS >= 3) ? BD : 1);
MAPSConvolution<DIMS, BW, ((DIMS >= 2) ? BH : 1),
((DIMS >= 3) ? BD : 1), RADIUS, BORDERS> <<<grid_dims, block_dims>>>(win, soout);
CUASSERT_NOERR(cudaGetLastError());
CUASSERT_NOERR(cudaDeviceSynchronize());
CUASSERT_NOERR(cudaMemcpy(buffer_out, device_out, sizeof(float) * total_size, cudaMemcpyDeviceToHost));
// If unit kernel, verify by testing equality to source buffer.
if (!random_kernel)
{
for (size_t i = 0; i < total_size; ++i)
ASSERT_LE(fabs(buffer_in[i] - buffer_out[i]), kEpsilon)
<< "Unequal values in unit convolution, index " << i
<< " (" << buffer_out[i] << " != " << buffer_in[i]
<< ") when convolving a " << BufferSize<DIMS>(buffer_size)
<< " buffer with a kernel with radius " << RADIUS
<< ", using " << BoundariesToString(BORDERS) << " boundaries";
}
else // Otherwise, use regression
{
ConvRegression<DIMS, RADIUS, BORDERS>::RunConvRegression(
device_in, device_out, buffer_regression, buffer_size);
for (size_t i = 0; i < total_size; ++i)
ASSERT_LE(fabs(buffer_regression[i] - buffer_out[i]), kEpsilon)
<< "Unequal values in randomized convolution, index " << i
<< " (" << buffer_out[i] << " != " << buffer_regression[i]
<< ") when convolving a " << BufferSize<DIMS>(buffer_size)
<< " buffer with a kernel with radius " << RADIUS
<< ", using " << BoundariesToString(BORDERS) << " boundaries";
}
}
template<int DIMS, bool RANDOMIZED, int RAD_COUNTER, int RAD_END>
struct WindowRadiusLoop
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_COUNTER, maps::WB_WRAP>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_COUNTER, maps::WB_COPY>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_COUNTER, maps::WB_ZERO>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
WindowRadiusLoop<DIMS, RANDOMIZED, RAD_COUNTER + 1, RAD_END>::Loop(size, hKernel, hIn, hOut,
hRegression, dRegressionIn,
dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_END>
struct WindowRadiusLoop<DIMS, RANDOMIZED, RAD_END, RAD_END>
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_END, maps::WB_WRAP>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_END, maps::WB_COPY>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
TestWindow<DIMS, RAD_END, maps::WB_ZERO>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_COUNTER, int RAD_END>
struct WindowRadiusLoopNoChecks
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_COUNTER, maps::WB_NOCHECKS>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
WindowRadiusLoopNoChecks<DIMS, RANDOMIZED, RAD_COUNTER + 1, RAD_END>::Loop(
size, hKernel, hIn, hOut, hRegression, dRegressionIn, dRegressionOut);
}
};
template<int DIMS, bool RANDOMIZED, int RAD_END>
struct WindowRadiusLoopNoChecks<DIMS, RANDOMIZED, RAD_END, RAD_END>
{
static void Loop(unsigned int size[DIMS], float *hKernel, float *hIn, float *hOut,
float *hRegression, float *dRegressionIn, float *dRegressionOut)
{
TestWindow<DIMS, RAD_END, maps::WB_NOCHECKS>(RANDOMIZED, size, hKernel, hIn, hOut,
hRegression, dRegressionIn, dRegressionOut);
}
};
TEST(Window, Window1DUnit)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionIn, max_buffer_size), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionOut, max_buffer_size), cudaSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(cudaMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
cudaMemcpyHostToDevice), cudaSuccess);
// Loop over the various buffer sizes.
for (int size_ind = 0; size_ind < sizeof(kSizes) / sizeof(unsigned int); ++size_ind)
{
unsigned int size[1] = { kSizes[size_ind] };
WindowRadiusLoop<1, false, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
ASSERT_EQ(cudaFree(dRegressionIn), cudaSuccess);
ASSERT_EQ(cudaFree(dRegressionOut), cudaSuccess);
}
TEST(Window, Window1DRandom)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionIn, max_buffer_size), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionOut, max_buffer_size), cudaSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(cudaMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
cudaMemcpyHostToDevice), cudaSuccess);
// Loop over the various buffer sizes.
for (int size_ind = 0; size_ind < sizeof(kSizes) / sizeof(unsigned int); ++size_ind)
{
unsigned int size[1] = { kSizes[size_ind] };
WindowRadiusLoop<1, true, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
ASSERT_EQ(cudaFree(dRegressionIn), cudaSuccess);
ASSERT_EQ(cudaFree(dRegressionOut), cudaSuccess);
}
TEST(Window, Window2DUnit)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionIn, max_buffer_size), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionOut, max_buffer_size), cudaSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(cudaMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
cudaMemcpyHostToDevice), cudaSuccess);
// Loop over the various buffer sizes.
for (int size_ind1 = 0; size_ind1 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind1)
{
for (int size_ind2 = 0; size_ind2 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind2)
{
unsigned int size[2] = { kSizes[size_ind1], kSizes[size_ind2] };
WindowRadiusLoop<2, false, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
}
ASSERT_EQ(cudaFree(dRegressionIn), cudaSuccess);
ASSERT_EQ(cudaFree(dRegressionOut), cudaSuccess);
}
TEST(Window, Window2DRandom)
{
// Prepare buffers in advance.
int num_sizes = sizeof(kSizes) / sizeof(unsigned int);
size_t max_buffer_size = kSizes[num_sizes - 1] * kSizes[num_sizes - 1] * sizeof(float);
size_t max_kernel_size = (MAX_WINDOW_RADIUS * 2 + 1) *
(MAX_WINDOW_RADIUS * 2 + 1) * sizeof(float);
maps::pinned_vector<float> hKernel(max_kernel_size, 0.0f),
hBuffIn(max_buffer_size),
hBuffOut(max_buffer_size),
hBuffRegression(max_buffer_size);
float *dRegressionIn = nullptr, *dRegressionOut = nullptr;
ASSERT_EQ(cudaSetDevice(0), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionIn, max_buffer_size), cudaSuccess);
ASSERT_EQ(cudaMalloc(&dRegressionOut, max_buffer_size), cudaSuccess);
// Prepare random number generator.
std::mt19937 gen(kRandomSeed2);
std::uniform_real_distribution<float> ud(-1.0f, 1.0f);
for (size_t i = 0; i < max_buffer_size / sizeof(float); ++i)
hBuffIn[i] = ud(gen);
ASSERT_EQ(cudaMemcpy(dRegressionIn, &hBuffIn[0], max_buffer_size,
cudaMemcpyHostToDevice), cudaSuccess);
// Loop over the various buffer sizes.
for (int size_ind1 = 0; size_ind1 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind1)
{
for (int size_ind2 = 0; size_ind2 < sizeof(kSizes) / sizeof(unsigned int); ++size_ind2)
{
unsigned int size[2] = { kSizes[size_ind1], kSizes[size_ind2] };
WindowRadiusLoop<2, true, MIN_WINDOW_RADIUS, MAX_WINDOW_RADIUS>::Loop(
size, &hKernel[0], &hBuffIn[0], &hBuffOut[0], &hBuffRegression[0],
dRegressionIn, dRegressionOut);
}
}
ASSERT_EQ(cudaFree(dRegressionIn), cudaSuccess);
ASSERT_EQ(cudaFree(dRegressionOut), cudaSuccess);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <int BLOCK_WIDTH, int RADIUS>
__global__ void RelativeIndex1DKernel(maps::WindowSingleGPU<float, 1, BLOCK_WIDTH, 1, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 1, BLOCK_WIDTH, 1, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// O X X
*out.begin() = in.at(-1);
out.commit();
}
TEST(Window, RelativeIndex1D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCKS = 5,
TOTAL_SIZE = BLOCK_WIDTH * BLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(cudaMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(cudaMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(cudaMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, cudaMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 1, BLOCK_WIDTH, 1, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_SIZE;
maps::StructuredInjectiveSingleGPU<float, 1, BLOCK_WIDTH, 1, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_SIZE;
// Run test
RelativeIndex1DKernel<BLOCK_WIDTH, 1><<<BLOCKS, BLOCK_WIDTH>>>(win, soout);
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
CUASSERT_NOERR(cudaMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, cudaMemcpyDeviceToHost));
for (int i = 0; i < TOTAL_SIZE; ++i)
ASSERT_EQ(out_val[i], in_val[maps::Wrap(i - 1, TOTAL_SIZE)]) << "at index " << i;
// Free GPU memory
CUASSERT_NOERR(cudaFree(d_in));
CUASSERT_NOERR(cudaFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT, int RADIUS>
__global__ void RelativeIndex2DKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// X X X
// X X X
// X X O
*out.begin() = in.at(1, 1);
out.commit();
}
TEST(Window, RelativeIndex2D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
XBLOCKS = 2,
YBLOCKS = 3,
TOTAL_SIZE = BLOCK_WIDTH * BLOCK_HEIGHT * XBLOCKS * YBLOCKS,
TOTAL_WIDTH = BLOCK_WIDTH * XBLOCKS,
TOTAL_HEIGHT = BLOCK_HEIGHT * YBLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(cudaMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(cudaMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(cudaMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, cudaMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
RelativeIndex2DKernel<BLOCK_WIDTH, BLOCK_HEIGHT, 1> <<<dim3(XBLOCKS, YBLOCKS), dim3(BLOCK_WIDTH, BLOCK_HEIGHT)>>>(win, soout);
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
CUASSERT_NOERR(cudaMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, cudaMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x],
in_val[maps::Wrap(y + 1, TOTAL_HEIGHT) * TOTAL_WIDTH + maps::Wrap(x + 1, TOTAL_WIDTH)])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(cudaFree(d_in));
CUASSERT_NOERR(cudaFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT, int RADIUS>
__global__ void RelativeIndexAligned2DKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, RADIUS, 1, 1, 1, maps::WB_WRAP> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
__shared__ typename decltype(in)::SharedData sdata;
in.init(sdata);
out.init();
if (out.Items() == 0)
return;
// Only use the "O" indices
// X O X
// O X O
// X O X
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = in.aligned_at(oiter, 0, -1) +
in.aligned_at(oiter, -1, 0) +
in.aligned_at(oiter, 1, 0) +
in.aligned_at(oiter, 0, 1);
}
out.commit();
}
TEST(Window, RelativeIndexAligned2D)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
XBLOCKS = 2,
YBLOCKS = 3,
TOTAL_SIZE = BLOCK_WIDTH * BLOCK_HEIGHT * XBLOCKS * YBLOCKS,
TOTAL_WIDTH = BLOCK_WIDTH * XBLOCKS,
TOTAL_HEIGHT = BLOCK_HEIGHT * YBLOCKS,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(cudaMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(cudaMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(cudaMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, cudaMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 1, 1, 1, 1, maps::WB_WRAP> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
RelativeIndexAligned2DKernel<BLOCK_WIDTH, BLOCK_HEIGHT, 1> <<<dim3(XBLOCKS, YBLOCKS), dim3(BLOCK_WIDTH, BLOCK_HEIGHT)>>>(win, soout);
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
CUASSERT_NOERR(cudaMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, cudaMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x],
(in_val[maps::Wrap(y - 1, TOTAL_HEIGHT) * TOTAL_WIDTH + x] +
in_val[y * TOTAL_WIDTH + maps::Wrap(x - 1, TOTAL_WIDTH)] +
in_val[y * TOTAL_WIDTH + maps::Wrap(x + 1, TOTAL_WIDTH)] +
in_val[maps::Wrap(y + 1, TOTAL_HEIGHT) * TOTAL_WIDTH + x]))
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(cudaFree(d_in));
CUASSERT_NOERR(cudaFree(d_out));
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT>
__global__ void NoRadiusSingleGPUKernel(maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> in,
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> out)
{
MAPS_INIT(in, out);
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = *in.align(oiter);
}
out.commit();
}
template <int BLOCK_WIDTH, int BLOCK_HEIGHT>
__global__ void NoRadiusMultiGPUKernel(MAPS_MULTIDEF2,
maps::Window<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> in,
maps::StructuredInjective2D<float, BLOCK_WIDTH, BLOCK_HEIGHT> out)
{
MAPS_MULTI_INITVARS(in, out);
if (out.Items() == 0)
return;
#pragma unroll
MAPS_FOREACH(oiter, out)
{
*oiter = *in.align(oiter);
}
out.commit();
}
TEST(Window, NoRadiusSingleGPU)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(cudaMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(cudaMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(cudaMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, cudaMemcpyHostToDevice));
// Create structures
maps::WindowSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
maps::StructuredInjectiveSingleGPU<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
// Run test
NoRadiusSingleGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT> <<<dim3(XBLOCKS, YBLOCKS), dim3(BLOCK_WIDTH, BLOCK_HEIGHT)>>>(win, soout);
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
CUASSERT_NOERR(cudaMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, cudaMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(cudaFree(d_in));
CUASSERT_NOERR(cudaFree(d_out));
}
TEST(Window, NoRadiusMultiGPU)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
float *d_in = nullptr, *d_out = nullptr;
CUASSERT_NOERR(cudaMalloc(&d_in, sizeof(float) * TOTAL_SIZE));
CUASSERT_NOERR(cudaMalloc(&d_out, sizeof(float) * TOTAL_SIZE));
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Copy input
CUASSERT_NOERR(cudaMemcpy(d_in, &in_val[0], sizeof(float) * TOTAL_SIZE, cudaMemcpyHostToDevice));
// Create structures
maps::Window<float, 2, BLOCK_WIDTH, BLOCK_HEIGHT, 1, 0, 1, 1, 1, maps::WB_ZERO> win;
win.m_ptr = d_in;
win.m_stride = win.m_dimensions[0] = TOTAL_WIDTH;
win.m_dimensions[1] = TOTAL_HEIGHT;
win.m_containsApron = true;
win.block_offset = 0;
win.m_gridWidth = XBLOCKS;
maps::StructuredInjective2D<float, BLOCK_WIDTH, BLOCK_HEIGHT> soout;
soout.m_ptr = d_out;
soout.m_stride = soout.m_dimensions[0] = TOTAL_WIDTH;
soout.m_dimensions[1] = TOTAL_HEIGHT;
soout.grid_dims = dim3(XBLOCKS, YBLOCKS);
soout.blockId = make_uint3(0,0,0);
// Run test
NoRadiusMultiGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT> <<<dim3(XBLOCKS * YBLOCKS), dim3(BLOCK_WIDTH, BLOCK_HEIGHT)>>>(
0, dim3(XBLOCKS, YBLOCKS), make_uint3(0,0,0), win, soout);
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
CUASSERT_NOERR(cudaMemcpy(&out_val[0], d_out, sizeof(float) * TOTAL_SIZE, cudaMemcpyDeviceToHost));
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
// Free GPU memory
CUASSERT_NOERR(cudaFree(d_in));
CUASSERT_NOERR(cudaFree(d_out));
}
TEST(Window, NoRadiusMAPSMulti)
{
enum
{
BLOCK_WIDTH = 32,
BLOCK_HEIGHT = 16,
TOTAL_WIDTH = 5000,
TOTAL_HEIGHT = 38,
XBLOCKS = (TOTAL_WIDTH + BLOCK_WIDTH - 1) / BLOCK_WIDTH,
YBLOCKS = (TOTAL_HEIGHT + BLOCK_HEIGHT - 1) / BLOCK_HEIGHT,
TOTAL_SIZE = TOTAL_WIDTH * TOTAL_HEIGHT,
};
// Allocate GPU memory
maps::multi::Matrix<float> in(TOTAL_WIDTH, TOTAL_HEIGHT),
out(TOTAL_WIDTH, TOTAL_HEIGHT);
// Initialize input
std::vector<float> in_val(TOTAL_SIZE), out_val(TOTAL_SIZE);
for (int x = 0; x < TOTAL_SIZE; ++x)
{
in_val[x] = (float)x;
out_val[x] = 0.0f;
}
// Bind matrices
in.Bind(&in_val[0]);
out.Bind(&out_val[0]);
maps::multi::Scheduler sched{0};
sched.AnalyzeCall(dim3(), dim3(BLOCK_WIDTH, BLOCK_HEIGHT),
maps::multi::Window2D<float, BLOCK_WIDTH, BLOCK_HEIGHT, 0>(in),
maps::multi::StructuredInjectiveMatrixO<float>(out));
// Run test
sched.Invoke(NoRadiusMultiGPUKernel<BLOCK_WIDTH, BLOCK_HEIGHT>, dim3(), dim3(BLOCK_WIDTH, BLOCK_HEIGHT),
maps::multi::Window2D<float, BLOCK_WIDTH, BLOCK_HEIGHT, 0>(in),
maps::multi::StructuredInjectiveMatrixO<float>(out));
CUASSERT_NOERR(cudaDeviceSynchronize());
// Copy output
sched.Gather<false>(out);
for (int y = 0; y < TOTAL_HEIGHT; ++y)
{
for (int x = 0; x < TOTAL_WIDTH; ++x)
{
ASSERT_EQ(out_val[y * TOTAL_WIDTH + x], in_val[y * TOTAL_WIDTH + x])
<< "at index (" << x << ", " << y << ")";
}
}
}
|
27ee0a2ce04a898bdf5424d92f2e8e737b0e3b1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fashion.h"
void Pool::init(int minib, int Inputimage_h, int Inputimage_w,
int Inputimage_ch, int pool_size) {
std::default_random_engine generator;
std::normal_distribution<float> distribution(0, 1.0);
this->Inputimage_height = Inputimage_h;
this->Inputimage_width = Inputimage_w;
this->Inputimage_channel = Inputimage_ch;
this->Outputimage_height = Inputimage_h / pool_size;
this->Outputimage_width = Inputimage_w / pool_size;
this->Outputimage_channel = Inputimage_ch;
this->Output_height = minib;
this->Output_width =
Outputimage_channel * Outputimage_height * Outputimage_width;
this->minibatch = minib;
this->X_height = minib;
this->X_width = Inputimage_channel * Inputimage_height * Inputimage_width;
this->b_height = minib;
this->b_width = Inputimage_channel;
this->pool_size = pool_size;
this->X_c.resize(
minibatch * Inputimage_channel * Inputimage_height * Inputimage_width, 0);
this->X.resize(
minibatch * Inputimage_channel * Inputimage_height * Inputimage_width, 0);
this->Output_c.resize(minibatch * Outputimage_channel * Outputimage_height *
Outputimage_width,
0);
this->Output.resize(minibatch * Outputimage_channel * Outputimage_height *
Outputimage_width,
0);
this->b.resize(Inputimage_channel, 0.1);
this->b_c.resize(Inputimage_channel, 0.1);
}
void Pool::forward_GPU_naive(device_vector<float> &input) {
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
int bz = ceil((float)Outputimage_width / TILE_WIDTH) *
ceil((float)Outputimage_height / TILE_WIDTH);
if (bz == 0)
bz = 1;
dim3 numBlocks(minibatch, Outputimage_channel, bz);
float *input_pointer = thrust::raw_pointer_cast(input.data());
float *Output_pointer = thrust::raw_pointer_cast(Output.data());
hipLaunchKernelGGL(( poolingLayer_forward_GPU_naive), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
input_pointer, Inputimage_height, Inputimage_width, Output_pointer,
Outputimage_channel, pool_size);
}
// double for loop version
void Pool::backward_GPU(device_vector<float> &output) {
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
int bz = ceil((float)Outputimage_width / TILE_WIDTH) *
ceil((float)Outputimage_height / TILE_WIDTH);
if (bz == 0)
bz = 1;
dim3 numBlocks(minibatch, Outputimage_channel, bz);
float *input_pointer = thrust::raw_pointer_cast(X.data());
float *output_pointer = thrust::raw_pointer_cast(output.data());
hipLaunchKernelGGL(( poolingLayer_backward_GPU), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
input_pointer, Inputimage_height, Inputimage_width, output_pointer,
Outputimage_channel, pool_size);
}
// M: number of input, output feature maps
// H_in: height of each input image
// W_in: width of each input map image
// X: input feature maps
// Y: output feature maps
__global__ void poolingLayer_forward_GPU_naive(float *X, int H_in, int W_in,
float *Y, int M, int pool_size) {
int n, m, h, w, p, q;
int H_out = H_in / pool_size;
int W_out = W_in / pool_size;
int W_grid = ceilf((float)W_out / TILE_WIDTH);
if (W_grid == 0)
W_grid = 1;
n = blockIdx.x;
m = blockIdx.y;
h = (blockIdx.z / W_grid) * TILE_WIDTH + threadIdx.y;
w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x;
// h and w is not center point of calculating, it's upper left corner point of
// Input image
float acc = 0;
for (p = 0; p < pool_size; p++) { // loop over KxK input samples
for (q = 0; q < pool_size; q++)
if (h < H_out && w < W_out)
acc = acc + X[n * (M * H_in * W_in) + m * (H_in * W_in) +
(pool_size * h + p) * (W_in) + (pool_size * w + q)] /
(pool_size * pool_size);
}
__syncthreads();
if (h < H_out && w < W_out) {
Y[n * (M * H_out * W_out) + m * (H_out * W_out) + h * (W_out) + w] = acc;
}
}
// double for loop version
// M: number of input, output feature maps
// H_in: height of each input image
// W_in: width of each input map image
// X: input feature maps
// Y: output feature maps
__global__ void poolingLayer_backward_GPU(float *X, int H_in, int W_in,
float *Y, int M, int pool_size) {
int n, m, h, w, p, q;
int H_out = H_in / pool_size;
int W_out = W_in / pool_size;
int W_grid = ceilf((float)W_out / TILE_WIDTH);
if (W_grid == 0)
W_grid = 1;
n = blockIdx.x;
m = blockIdx.y;
h = (blockIdx.z / W_grid) * TILE_WIDTH + threadIdx.y;
w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x;
// h and w is not center point of calculating, it's upper left corner point of
// Input image
float acc = 0;
for (p = 0; p < pool_size; p++) { // loop over KxK input samples
for (q = 0; q < pool_size; q++)
if (h < H_out && w < W_out)
X[n * (M * H_in * W_in) + m * (H_in * W_in) +
(pool_size * h + p) * (W_in) + (pool_size * w + q)] =
Y[n * (M * H_out * W_out) + m * (H_out * W_out) + h * (W_out) + w];
}
__syncthreads();
}
| 27ee0a2ce04a898bdf5424d92f2e8e737b0e3b1c.cu | #include "fashion.h"
void Pool::init(int minib, int Inputimage_h, int Inputimage_w,
int Inputimage_ch, int pool_size) {
std::default_random_engine generator;
std::normal_distribution<float> distribution(0, 1.0);
this->Inputimage_height = Inputimage_h;
this->Inputimage_width = Inputimage_w;
this->Inputimage_channel = Inputimage_ch;
this->Outputimage_height = Inputimage_h / pool_size;
this->Outputimage_width = Inputimage_w / pool_size;
this->Outputimage_channel = Inputimage_ch;
this->Output_height = minib;
this->Output_width =
Outputimage_channel * Outputimage_height * Outputimage_width;
this->minibatch = minib;
this->X_height = minib;
this->X_width = Inputimage_channel * Inputimage_height * Inputimage_width;
this->b_height = minib;
this->b_width = Inputimage_channel;
this->pool_size = pool_size;
this->X_c.resize(
minibatch * Inputimage_channel * Inputimage_height * Inputimage_width, 0);
this->X.resize(
minibatch * Inputimage_channel * Inputimage_height * Inputimage_width, 0);
this->Output_c.resize(minibatch * Outputimage_channel * Outputimage_height *
Outputimage_width,
0);
this->Output.resize(minibatch * Outputimage_channel * Outputimage_height *
Outputimage_width,
0);
this->b.resize(Inputimage_channel, 0.1);
this->b_c.resize(Inputimage_channel, 0.1);
}
void Pool::forward_GPU_naive(device_vector<float> &input) {
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
int bz = ceil((float)Outputimage_width / TILE_WIDTH) *
ceil((float)Outputimage_height / TILE_WIDTH);
if (bz == 0)
bz = 1;
dim3 numBlocks(minibatch, Outputimage_channel, bz);
float *input_pointer = thrust::raw_pointer_cast(input.data());
float *Output_pointer = thrust::raw_pointer_cast(Output.data());
poolingLayer_forward_GPU_naive<<<numBlocks, threadsPerBlock>>>(
input_pointer, Inputimage_height, Inputimage_width, Output_pointer,
Outputimage_channel, pool_size);
}
// double for loop version
void Pool::backward_GPU(device_vector<float> &output) {
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
int bz = ceil((float)Outputimage_width / TILE_WIDTH) *
ceil((float)Outputimage_height / TILE_WIDTH);
if (bz == 0)
bz = 1;
dim3 numBlocks(minibatch, Outputimage_channel, bz);
float *input_pointer = thrust::raw_pointer_cast(X.data());
float *output_pointer = thrust::raw_pointer_cast(output.data());
poolingLayer_backward_GPU<<<numBlocks, threadsPerBlock>>>(
input_pointer, Inputimage_height, Inputimage_width, output_pointer,
Outputimage_channel, pool_size);
}
// M: number of input, output feature maps
// H_in: height of each input image
// W_in: width of each input map image
// X: input feature maps
// Y: output feature maps
__global__ void poolingLayer_forward_GPU_naive(float *X, int H_in, int W_in,
float *Y, int M, int pool_size) {
int n, m, h, w, p, q;
int H_out = H_in / pool_size;
int W_out = W_in / pool_size;
int W_grid = ceilf((float)W_out / TILE_WIDTH);
if (W_grid == 0)
W_grid = 1;
n = blockIdx.x;
m = blockIdx.y;
h = (blockIdx.z / W_grid) * TILE_WIDTH + threadIdx.y;
w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x;
// h and w is not center point of calculating, it's upper left corner point of
// Input image
float acc = 0;
for (p = 0; p < pool_size; p++) { // loop over KxK input samples
for (q = 0; q < pool_size; q++)
if (h < H_out && w < W_out)
acc = acc + X[n * (M * H_in * W_in) + m * (H_in * W_in) +
(pool_size * h + p) * (W_in) + (pool_size * w + q)] /
(pool_size * pool_size);
}
__syncthreads();
if (h < H_out && w < W_out) {
Y[n * (M * H_out * W_out) + m * (H_out * W_out) + h * (W_out) + w] = acc;
}
}
// double for loop version
// M: number of input, output feature maps
// H_in: height of each input image
// W_in: width of each input map image
// X: input feature maps
// Y: output feature maps
__global__ void poolingLayer_backward_GPU(float *X, int H_in, int W_in,
float *Y, int M, int pool_size) {
int n, m, h, w, p, q;
int H_out = H_in / pool_size;
int W_out = W_in / pool_size;
int W_grid = ceilf((float)W_out / TILE_WIDTH);
if (W_grid == 0)
W_grid = 1;
n = blockIdx.x;
m = blockIdx.y;
h = (blockIdx.z / W_grid) * TILE_WIDTH + threadIdx.y;
w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x;
// h and w is not center point of calculating, it's upper left corner point of
// Input image
float acc = 0;
for (p = 0; p < pool_size; p++) { // loop over KxK input samples
for (q = 0; q < pool_size; q++)
if (h < H_out && w < W_out)
X[n * (M * H_in * W_in) + m * (H_in * W_in) +
(pool_size * h + p) * (W_in) + (pool_size * w + q)] =
Y[n * (M * H_out * W_out) + m * (H_out * W_out) + h * (W_out) + w];
}
__syncthreads();
}
|
6e4c8cb37ac0a91e5dc56ed0d2e478837d0d5860.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "sum_conv_fc_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SumConvFCForward(const int nthreads,
const T* input1, const T* input2,
const int pixels,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index / pixels;
output[index] = input1[index] + input2[idx];
}
}
template <typename T>
__global__ void SumConvFCBackward(const int nthreads, const T* input_grad,
const int pixels, T* output_grad1, T* output_grad2) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// first for X1
output_grad1[index] = input_grad[index];
// only doing this for the first element, not sure if it is
// the most efficient way though
if (index % pixels == 0) {
const int idx = index / pixels;
const int base_start = idx * pixels;
const int base_end = base_start + pixels;
// just summing things up
T grad = 0.;
for (int i=base_start; i<base_end; i++) {
grad += input_grad[i];
}
output_grad2[idx] = grad;
}
}
}
} // namespace
template<>
bool SumConvFCOp<float, CUDAContext>::RunOnDevice() {
auto& X1 = Input(0); // Input data 1
auto& X2 = Input(1); // Input data 2
auto* Y = Output(0); // Output data, summation of the two
const int N = X1.dim32(0);
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
DCHECK_EQ(N, X2.dim32(0));
DCHECK_EQ(C, X2.dim32(1));
DCHECK_EQ(X2.ndim(),2);
const int pixels = H * W;
// N, C, H, W
Y->Resize(N, C, H, W);
const int output_size = Y->size();
hipLaunchKernelGGL(( SumConvFCForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, X1.data<float>(), X2.data<float>(),
pixels, Y->mutable_data<float>());
return true;
}
template<>
bool SumConvFCGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0); // Gradient of the output data
auto& X1 = Input(1); // Input data 1
auto& X2 = Input(2); // Input data 2
auto* dX1 = Output(0); // Gradient of the input data 1
auto* dX2 = Output(1); // Gradient of the input data 2
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int pixels = H * W;
const int output_size = dY.size();
dX1->ResizeLike(X1);
dX2->ResizeLike(X2);
hipLaunchKernelGGL(( SumConvFCBackward<float>), dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, dY.data<float>(),
pixels,
dX1->mutable_data<float>(),
dX2->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SumConvFC,
SumConvFCOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumConvFCGradient,
SumConvFCGradientOp<float, CUDAContext>);
} // namespace caffe2 | 6e4c8cb37ac0a91e5dc56ed0d2e478837d0d5860.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "sum_conv_fc_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SumConvFCForward(const int nthreads,
const T* input1, const T* input2,
const int pixels,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int idx = index / pixels;
output[index] = input1[index] + input2[idx];
}
}
template <typename T>
__global__ void SumConvFCBackward(const int nthreads, const T* input_grad,
const int pixels, T* output_grad1, T* output_grad2) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// first for X1
output_grad1[index] = input_grad[index];
// only doing this for the first element, not sure if it is
// the most efficient way though
if (index % pixels == 0) {
const int idx = index / pixels;
const int base_start = idx * pixels;
const int base_end = base_start + pixels;
// just summing things up
T grad = 0.;
for (int i=base_start; i<base_end; i++) {
grad += input_grad[i];
}
output_grad2[idx] = grad;
}
}
}
} // namespace
template<>
bool SumConvFCOp<float, CUDAContext>::RunOnDevice() {
auto& X1 = Input(0); // Input data 1
auto& X2 = Input(1); // Input data 2
auto* Y = Output(0); // Output data, summation of the two
const int N = X1.dim32(0);
const int C = X1.dim32(1);
const int H = X1.dim32(2);
const int W = X1.dim32(3);
DCHECK_EQ(N, X2.dim32(0));
DCHECK_EQ(C, X2.dim32(1));
DCHECK_EQ(X2.ndim(),2);
const int pixels = H * W;
// N, C, H, W
Y->Resize(N, C, H, W);
const int output_size = Y->size();
SumConvFCForward<float><<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, X1.data<float>(), X2.data<float>(),
pixels, Y->mutable_data<float>());
return true;
}
template<>
bool SumConvFCGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0); // Gradient of the output data
auto& X1 = Input(1); // Input data 1
auto& X2 = Input(2); // Input data 2
auto* dX1 = Output(0); // Gradient of the input data 1
auto* dX2 = Output(1); // Gradient of the input data 2
const int H = X1.dim32(2);
const int W = X1.dim32(3);
const int pixels = H * W;
const int output_size = dY.size();
dX1->ResizeLike(X1);
dX2->ResizeLike(X2);
SumConvFCBackward<float><<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, dY.data<float>(),
pixels,
dX1->mutable_data<float>(),
dX2->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SumConvFC,
SumConvFCOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SumConvFCGradient,
SumConvFCGradientOp<float, CUDAContext>);
} // namespace caffe2 |
014f8d2e8ea865fa217d2177215c0fd4dfb1f77b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", blockIdx.x, threadIdx.x);
}
int main(){
hipLaunchKernelGGL(( hello), dim3(3), dim3(4), 0, 0, );
hipDeviceSynchronize();
}
| 014f8d2e8ea865fa217d2177215c0fd4dfb1f77b.cu | #include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", blockIdx.x, threadIdx.x);
}
int main(){
hello<<<3, 4>>>();
cudaDeviceSynchronize();
}
|
14e656f7d4f41d4f40a3ebd5160a0b6f32a0ea24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by *TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//int Row = blockIdx.y*blockDim.y+threadIdx.y;
//int Col = blockIdx.x*blockDim.x+threadIdx.x;
float Cvalue = 0;
// Loop over the A and B tiles required to compute the C element
for (int t = 0; t < (numBRows-1)/TILE_WIDTH + 1; ++t)
{
if(Row < numARows && t*TILE_WIDTH+tx < numBRows)
{
// Collaborative loading of A
ds_A[ty][tx] = A[Row*numAColumns + t*TILE_WIDTH+tx];
}
else
{ // Control divergence at the edge
ds_A[ty][tx]= 0.0;
}
if ( t*TILE_WIDTH+ty < numBRows && Col < numBColumns)
{
// Collaborative loading of B if within range of matrix
ds_B[ty][tx] = B[(t*TILE_WIDTH+ty)*numBColumns + Col];
}
else
{
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
{
Cvalue += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if ( Row < numARows && Col < numBColumns)
C[Row*numBColumns+Col] = Cvalue;
} | 14e656f7d4f41d4f40a3ebd5160a0b6f32a0ea24.cu | #include "includes.h"
__global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by *TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//int Row = blockIdx.y*blockDim.y+threadIdx.y;
//int Col = blockIdx.x*blockDim.x+threadIdx.x;
float Cvalue = 0;
// Loop over the A and B tiles required to compute the C element
for (int t = 0; t < (numBRows-1)/TILE_WIDTH + 1; ++t)
{
if(Row < numARows && t*TILE_WIDTH+tx < numBRows)
{
// Collaborative loading of A
ds_A[ty][tx] = A[Row*numAColumns + t*TILE_WIDTH+tx];
}
else
{ // Control divergence at the edge
ds_A[ty][tx]= 0.0;
}
if ( t*TILE_WIDTH+ty < numBRows && Col < numBColumns)
{
// Collaborative loading of B if within range of matrix
ds_B[ty][tx] = B[(t*TILE_WIDTH+ty)*numBColumns + Col];
}
else
{
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
{
Cvalue += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if ( Row < numARows && Col < numBColumns)
C[Row*numBColumns+Col] = Cvalue;
} |
6bde22bdd86bc7b3081895040b095380a586c9a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILE_WIDTH 32
/**
* This is a kernel MatrixMul function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel(float* A,
float* B,
float* C,
int m,
int n,
int dim){
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
// Calculate the number of phase
int phase_num = ceil(dim / (float)TILE_WIDTH);
bool cond0 = Row < m;
bool cond1 = Col < n;
float Cvalue = 0;
// Each thread loads 'Row'th row of A and 'Col'th column of B
for (int ph = 0; ph < phase_num; ++ph) {
if(ph * TILE_WIDTH + tx < dim){
Ads[ty][tx] = (cond0)?A[Row * dim + ph*TILE_WIDTH + tx]:0;
}
else{
Ads[ty][tx] = 0;
}
if(ph * TILE_WIDTH + ty < dim){
Bds[ty][tx] = (cond1)?B[(ph*TILE_WIDTH + ty)*n + Col]:0;
}
else{
Bds[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Cvalue += Ads[ty][k] * Bds[k][tx];
}
__syncthreads();
}
if(cond0 && cond1){
C[Row * n + Col] = Cvalue;
}
}
/**
* This is a parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
hipEvent_t start, stop;
float elapsedTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
hipMalloc((void**)&d_A, size_A);
hipMalloc((void**)&d_B, size_B);
hipMalloc((void**)&d_C, size_C);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, m, n, dim);
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
}
/**
* This is a baseline kernel function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel_Baseline(float* A,
float* B,
float* C,
int m,
int n,
int dim){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
bool cond0 = Row < m;
bool cond1 = Col < n;
if(cond0 && cond1){
float Cvalue = 0;
for(int i = 0;i<dim;i++){
Cvalue += A[Row*dim+i]*B[i*n+Col];
}
C[Row*n+Col] = Cvalue;
}
}
/**
* This is a baseline Parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul_baseline(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
hipEvent_t start, stop;
float elapsedTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
hipMalloc((void**)&d_A, size_A);
hipMalloc((void**)&d_B, size_B);
hipMalloc((void**)&d_C, size_C);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
hipLaunchKernelGGL(( MatrixMulKernel_Baseline), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, m, n, dim);
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("The baseline Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
} | 6bde22bdd86bc7b3081895040b095380a586c9a1.cu | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILE_WIDTH 32
/**
* This is a kernel MatrixMul function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel(float* A,
float* B,
float* C,
int m,
int n,
int dim){
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
// Calculate the number of phase
int phase_num = ceil(dim / (float)TILE_WIDTH);
bool cond0 = Row < m;
bool cond1 = Col < n;
float Cvalue = 0;
// Each thread loads 'Row'th row of A and 'Col'th column of B
for (int ph = 0; ph < phase_num; ++ph) {
if(ph * TILE_WIDTH + tx < dim){
Ads[ty][tx] = (cond0)?A[Row * dim + ph*TILE_WIDTH + tx]:0;
}
else{
Ads[ty][tx] = 0;
}
if(ph * TILE_WIDTH + ty < dim){
Bds[ty][tx] = (cond1)?B[(ph*TILE_WIDTH + ty)*n + Col]:0;
}
else{
Bds[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Cvalue += Ads[ty][k] * Bds[k][tx];
}
__syncthreads();
}
if(cond0 && cond1){
C[Row * n + Col] = Cvalue;
}
}
/**
* This is a parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
cudaMalloc((void**)&d_A, size_A);
cudaMalloc((void**)&d_B, size_B);
cudaMalloc((void**)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, m, n, dim);
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
/**
* This is a baseline kernel function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel_Baseline(float* A,
float* B,
float* C,
int m,
int n,
int dim){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
bool cond0 = Row < m;
bool cond1 = Col < n;
if(cond0 && cond1){
float Cvalue = 0;
for(int i = 0;i<dim;i++){
Cvalue += A[Row*dim+i]*B[i*n+Col];
}
C[Row*n+Col] = Cvalue;
}
}
/**
* This is a baseline Parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul_baseline(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
cudaMalloc((void**)&d_A, size_A);
cudaMalloc((void**)&d_B, size_B);
cudaMalloc((void**)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
MatrixMulKernel_Baseline<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, m, n, dim);
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("The baseline Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
} |
09272041b489eaedeba8e87cae78422ca80a043b.hip | // !!! This is a file automatically generated by hipify!!!
//a#########################################################
//a## 2D Acoustic ISO Medium RTM
//a## Ps : P + sv wave and get rid of sv
//a## GPU(CUDA) ,poynting adcigs
//a##
//a##/*a****************************************************/
//a##Function for ISO medium modeling,2017.10.12
//a##
//a## FD use VTI finite different
//a##
//a## Rong Tao
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define pi 3.141592653
#define mm 4
//__constant__ float c[mm]={1.125,-0.04166667};/*mm==2*/
//__constant__ float c[mm]={1.1718750,-0.065104167,0.0046875};/*mm==3*/
__constant__ float c[mm]={1.196289,-0.0797526,0.009570313,-0.0006975447};/*mm==4*/
//__constant__ float c[mm]={1.211243,-0.08972168,0.01384277,-0.00176566,0.0001186795};/*mm==5*/
__device__ float d0;
//a################################################################################
void check_gpu_error (const char *msg)
/*< check GPU errors >*/
{
hipError_t err = hipGetLastError ();
if (hipSuccess != err) {
printf("Cuda error: %s: %s\n", msg, hipGetErrorString (err));
exit(0);
}
}
/*************func**************/
void migraiton_laplace_filter(int adj, int nz, int nx, float *in, float *out)
/*< linear operator, come from Madagascar Mlaplac2>*/
{
int iz,ix,j;
for (j=0;j<nx*nz;j++) out[j]=0.0;
for (ix=0; ix < nx; ix++) {
for (iz=0; iz < nz; iz++) {
j = iz+ix*nz;
if (iz > 0) {
if (adj) {
out[j-1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-1];
}
}
if (iz < nz-1) {
if (adj) {
out[j+1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+1];
}
}
if (ix > 0) {
if (adj) {
out[j-nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-nz];
}
}
if (ix < nx-1) {
if (adj) {
out[j+nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+nz];
}
}
}
}
}
void laplac2_lop(int adj, int nz, int nx, float *in, float *out)
/*< linear operator >*/
{
int iz,ix,j;
for (ix=0; ix < nx; ix++) {
for (iz=0; iz < nz; iz++) {
j = iz+ix*nz;
if (iz > 0) {
if (adj) {
out[j-1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-1];
}
}
if (iz < nz-1) {
if (adj) {
out[j+1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+1];
}
}
if (ix > 0) {
if (adj) {
out[j-nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-nz];
}
}
if (ix < nx-1) {
if (adj) {
out[j+nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+nz];
}
}
}
}
}
/*************func**************/
__global__ void add_source(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt,float t,
float favg,int wtype,int npml,int is,int ds,float *P,float *Q)
/*< generate ricker wavelet with time deley >*/
{
int ixs,izs;
float x_,xx_,tdelay,ts,source=0.0,fs;
tdelay=1.0/favg;
ts=t-tdelay;
fs=xsn+(is-1)*ds;
if(wtype==1)//ricker wavelet
{
x_=favg*ts;
xx_=x_*x_;
source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_));
}else if(wtype==2){//derivative of gaussian
x_=(-4)*favg*favg*pi*pi/log(0.1);
source=(-2)*pi*pi*ts*exp(-x_*ts*ts);
}else if(wtype==3){//derivative of gaussian
x_=(-1)*favg*favg*pi*pi/log(0.1);
source=exp(-x_*ts*ts);
}
if(t<=2*tdelay)
{
ixs = (int)(fs+0.5)+npml-1;
izs = (int)(zsn+0.5)+npml-1;
P[ixs*nnz+izs]+=pfac*source;
Q[ixs*nnz+izs]+=pfac*source;
}
}
/*******************func*********************/
__global__ void update_vel(int nx,int nz,int nnx,int nnz,int npml,float dt,float dx,float dz,
float *u0,float *w0,float *u1,float *w1,float *P,float *Q,
float *coffx1,float *coffx2,float *coffz1,float *coffz2)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix,iz,im;
float dtx,dtz,xx,zz;
ix=id/nnz;
iz=id%nnz;
dtx=dt/dx;
dtz=dt/dz;
if(id>=mm&&id<nnx*nnz-mm)
{
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]);
zz+=c[im]*(Q[id+im+1] -Q[id-im]);
}
u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx;
w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz;
}
}
}
/*******************func***********************/
__global__ void update_stress(int nx,int nz,int nnx,int nnz,float dt,float dx,float dz,
float *u1,float *w1,float *P,float *Q,float *vp,int npml,
float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0,
float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2,
float *delta,float *epsilon,int fs,int ds,int zs,int is,bool SV)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int im,ix,iz,rx,rz,R=15,r=5;
float dtx,dtz, xx,zz,ee,dd;
ix=id/nnz;
iz=id%nnz;
dtx=dt/dx;
dtz=dt/dz;
if(id>=mm&&id<nnx*nnz-mm)
{
/************************i****************************************/
/************************iso circle start*************************/
rx=ix-(fs+(is-1)*ds+npml);
rz=iz-(zs+npml);
if(SV){
if((rx*rx+rz*rz)<=R*R){
if((rx*rx+rz*rz)<=r*r){
ee = 0.0;
dd = 0.0;
}else{
ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilon[id];
dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*delta[id];
}
}else{
ee=epsilon[id];
dd=delta[id];
}
}else{
ee=epsilon[id];
dd=delta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]);
zz+=c[im]*(w1[id+im] -w1[id-im-1]);
}
px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx;
pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz;
qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx;
qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz;
P[id]=px1[id]+pz1[id];
Q[id]=qx1[id]+qz1[id];
}
}
}
/********************func**********************/
__global__ void get_d0(float dx,float dz,int nnx,int nnz,int npml,float *vp)
{
d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npml*((dx+dz)/2.0));
}
/*************func*******************/
void pad_vv(int nx,int nz,int nnx,int nnz,int npml,float *ee)
{
int ix,iz,id;
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(ix<npml){
ee[id]=ee[npml*nnz+iz]; //left
}else if(ix>=nnx-npml){
ee[id]=ee[(nnx-npml-1)*nnz+iz];//right
}
}
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(iz<npml){
ee[id]=ee[ix*nnz+npml];//up
}else if(iz>=nnz-npml){
ee[id]=ee[ix*nnz+nnz-npml-1];//down
}
}
}
/*************func*******************/
void read_file(char FN1[],int nx,int nz,int nnx,int nnz,float dx,float dz,float favg,float dt,
float *v,float *e,float *d,int npml)
{
int i,j,id;
float vmax, vmin, H_min, dt_max, dxz_max, C, tmp;
FILE *fp1;
if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);exit(0);}
vmin= 999999.9;
vmax=-999999.9;
for(i=npml;i<nx+npml;i++)
{
for(j=npml;j<nz+npml;j++)
{
id=i*nnz+j;
fread(&v[id],4L,1,fp1);
e[id] = 0.0;
d[id] = 0.0;
if(vmax<v[id]) vmax = v[id];
if(vmin>v[id]) vmin = v[id];
}
}
fclose(fp1);
printf("------------------------------------\n---\n");
printf("--- Vmax=%.4f, Vmin=%.4f\n",vmax,vmin);
/*********boundary*********/
pad_vv(nx,nz,nnx,nnz,npml,e);
pad_vv(nx,nz,nnx,nnz,npml,d);
pad_vv(nx,nz,nnx,nnz,npml,v);
H_min=dx<dz?dx:dz;
dt_max = 0.5*H_min/vmin;
dxz_max = vmax/favg*0.2;
if(dxz_max<dz||dxz_max<dx){printf("--- You need have to redefine DX and DZ ! \n");exit(0);}
if(dt_max<dt){printf("--- You need have to redefine DT ! \n");exit(0);}
if ( favg >= vmin/( 5.0*(dx>dz?dx:dz) ) || favg >= vmin/( 5.0*(dx>dz?dx:dz) ) )
{printf("--- Non-dispersion relation not satisfied! \n");exit(0);}
else if ( mm == 2 ) C = 0.857;
else if ( mm == 3 ) C = 0.8;
else if ( mm == 4 ) C = 0.777;
else if ( mm == 5 ) C = 0.759;
tmp = dt*vmax*sqrtf( 1.0/(dx*dx)+1.0/(dz*dz) );
if ( tmp >= C){ printf("--- Stability condition not satisfied! tmp = %f, C = %f\n",tmp,C);exit(0);}
}
/*************func*******************/
__global__ void initial_coffe(float dt,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npml)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nn+2*npml)
{
if(id<npml)
{
coff1[id]=1.0/(1.0+(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0);
}else if(id>=npml&&id<npml+nn){
coff1[id]=1.0;
coff2[id]=1.0;
acoff1[id]=1.0;
acoff2[id]=1.0;
}else{
coff1[id]=1.0/(1.0+(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0);
}
}
}
/*************func*******************/
__global__ void shot_record(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *shot, bool flag)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx)
{
if(flag){
shot[it+nt*id]=P[npml+nnz*(id+npml)];
}else{
P[npml+nnz*(id+npml)]=shot[it+nt*id];
}
}
}
/*************func*******************/
__global__ void wavefield_bndr(int nnx, int nnz, int nx, int nz, int npml, int it, int nt,
float *P, float *Q, float *P_bndr, float *Q_bndr, bool flag)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<2*nx+2*nz)
{
if(flag)/////////////////////////////////save boundary
{
if(id<nx){//up
P_bndr[it*(2*nx+2*nz)+id]=P[npml-1+nnz*(id+npml)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[npml-1+nnz*(id+npml)];
}else if(id>=nx&&id<(2*nx)){//down
P_bndr[it*(2*nx+2*nz)+id]=P[npml+nz+1+nnz*(id-nx+npml)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[npml+nz+1+nnz*(id-nx+npml)];
}else if(id>=(2*nx)&&id<(2*nx+nz)){//left
P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx+npml+nnz*(npml-1)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx+npml+nnz*(npml-1)];
}else if(id>=(2*nx+nz)){//right
P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx-nz+npml+nnz*(npml+nx+1)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx-nz+npml+nnz*(npml+nx+1)];
}
}else{/////////////////////////////add boundary
if(id<nx){//up
P[npml-1+nnz*(id+npml)]=P_bndr[it*(2*nx+2*nz)+id];
Q[npml-1+nnz*(id+npml)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=nx&&id<(2*nx)){//down
P[npml+nz+1+nnz*(id-nx+npml)]=P_bndr[it*(2*nx+2*nz)+id];
Q[npml+nz+1+nnz*(id-nx+npml)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=(2*nx)&&id<(2*nx+nz)){//left
P[id-2*nx+npml+nnz*(npml-1)]=P_bndr[it*(2*nx+2*nz)+id];
Q[id-2*nx+npml+nnz*(npml-1)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=(2*nx+nz)){//right
P[id-2*nx-nz+npml+nnz*(npml+nx+1)]=P_bndr[it*(2*nx+2*nz)+id];
Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]=Q_bndr[it*(2*nx+2*nz)+id];
}
}
}
}
/*************func**************/
__global__ void mute_directwave(int nx,int nt,float dt,float favg,
float dx,float dz,int fs,int ds,int zs,int is,
float *vp,float *epsilon,float *shot,int tt)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_z,mu_t0;
int ix=id/nt;
int it=id%nt;
if(id<nx*nt)
{
mu_x=dx*abs(ix-fs-(is-1)*ds);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if((it>(int)(mu_t0/dt)-tt)&&(it<mu_nt))
shot[id]=0.0;
}
}
/*************func**************/
__global__ void cal_illumination(int nnx, int nnz, int nz, int npml, float *illumination, float *P, float *Q)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
if(id<nnx*nnz)
{
illumination[id]+=P[iz+npml+nnz*(ix+npml)]*P[iz+npml+nnz*(ix+npml)]
+Q[iz+npml+nnz*(ix+npml)]*Q[iz+npml+nnz*(ix+npml)];
if(illumination[id]==0)illumination[id]=1.0;
}
}
/*************func**************/
__global__ void cal_migration(int nnx, int nnz, int nz, int npml, float *migration, float *s, float *g)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
if(id<nnx*nnz)
{
migration[id]+=s[iz+npml+nnz*(ix+npml)]*g[iz+npml+nnz*(ix+npml)];
}
}
/*************func**************/
__global__ void migration_illum(int nx, int nz, int npml, float *migration, float *illumination)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx*nz)
{
migration[id]/=illumination[id];//*illumination[id];
}
}
/*************func**************/
__global__ void Poynting_Adcigs(int nnz, int nx, int nz, int npml, int na, int da, float *adcigs,
float *s_P, float *s_Q, float *s_u, float *s_w,
float *g_P, float *g_Q, float *g_u, float *g_w)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
int ia=0;
float Ssx=-s_P[iz+npml+nnz*(ix+npml)]*s_u[iz+npml+nnz*(ix+npml)];
float Ssz=-s_Q[iz+npml+nnz*(ix+npml)]*s_w[iz+npml+nnz*(ix+npml)];
float Sgx= g_P[iz+npml+nnz*(ix+npml)]*g_u[iz+npml+nnz*(ix+npml)];
float Sgz= g_Q[iz+npml+nnz*(ix+npml)]*g_w[iz+npml+nnz*(ix+npml)];
float b1= Ssx*Ssx + Ssz*Ssz;
float b2= Sgx*Sgx + Sgz*Sgz;
float a=(Ssx*Sgx + Ssz*Sgz)/(sqrtf(b1*b2)*(1 - 0.1));
if(id<nx*nz)
{
if(a>=-1&&a<=1)
{
a=0.5*acosf(a)*180.0/pi;
ia=(int)(a/(da*1.0));
if(ia<na)
{
adcigs[iz+nz*ia+nz*na*(id/nz)] += s_P[iz+npml+nnz*(ix+npml)]*g_P[iz+npml+nnz*(ix+npml)]
*cosf(ia*pi/180.0)*cosf(ia*pi/180.0)*cosf(ia*pi/180.0);
}
}
}
}
/*************func**************/
__global__ void adcigs_illum(int nx, int nz, int na, int da, float *adcigs, float *illumination)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/(nz*na);
int iz=id%nz;
if(id<nx*nz*na)
{
adcigs[id]/=illumination[iz+nz*ix];//*illumination[iz+nz*ix];
}
}
/*************func**************/
void adcigs_laplace_filter(int nx,int nz,int na,float *adcigs,int flag)
{
int ix,iz,ia,id,ido;
float *in, *out,*temp;
in=(float*)malloc(nz*nx*sizeof(float));
out=(float*)malloc(nz*nx*sizeof(float));
temp=(float*)malloc(nz*nx*na*sizeof(float));
if(flag==1||flag==3)
for (ia=0; ia<na; ia++){
for (ix=0; ix<nx; ix++){
for (iz=0; iz<nz; iz++){
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
in[ido]=adcigs[id];
}
}
laplac2_lop( 1, nz, nx, in, out );
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
temp[id]+=out[ido];
if(flag==1)adcigs[id]=temp[id];
}
}
}
if(flag!=1)
for (ia=na-1; ia>=0; ia--) {
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
in[ido]=adcigs[id];
}
}
laplac2_lop( 1, nz, nx, in, out );
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
temp[id]+=out[ido];
if(flag==2||flag==3) adcigs[id]=temp[id];
}
}
}
}
/*************func**************/
void adcigs_chouxi(int nx,int nz,int na,int nxa,int dadcigs,float *adcigs)
{
int ix,iz,ia,id,ido;
float *temp;
temp=(float*)malloc(nz*nxa*na*sizeof(float));
for (ix=0; ix<nx; ix++) {
for (ia=0; ia<na; ia++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
if(ix%dadcigs==0) {
ido=ix/dadcigs*na*nz+ia*nz+iz;
temp[ido]=adcigs[id];
adcigs[ido]=temp[ido];
}
}
}
}
}
/*************func**************/
void stk_adcigs(int nx,int nz,int na,float *adcigs,float *migration)
{
int ix,iz,ia,id,ido;
float stk;
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
stk=0.0;
for (ia=0; ia<na; ia++) {
id=ix*na*nz+ia*nz+iz;
stk+=adcigs[id];
}
ido=ix*nz+iz;
migration[ido]=stk;
}
}
}
//a########################################################################
//a## Main Function ##
//a########################################################################
int main(int argc,char *argv[])
{
int is, it, nx, nz, nnx, nnz, nt, wtype, na, da, dadcigs, nxa;
int ns, ds, fs, zs, npml;
float dx, dz, dt, t, pfac, favg;
float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2;
float *v, *e, *d;
float *vp, *epsilon, *delta;
float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1;
float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1;
float *g_u0, *g_u1, *g_px0, *g_qx0, *g_px1, *g_qx1;
float *g_w0, *g_w1, *g_pz0, *g_qz0, *g_pz1, *g_qz1;
float *s_P, *s_Q, *g_P, *g_Q, *shot_Dev, *shot_Hos, *P_bndr, *Q_bndr;
float *migration, *illumination, *adcigs;
float *Atemp;
clock_t start, end;
/*************wavelet\boundary**************/
wtype=1;npml=20;
/********** dat document ***********/
char FN1[250]={"waxian_vel_601_301.dat"};
char FN4[250]={"55Hz_waxian_shot_obs.dat"};
char FN5[250]={"55Hz_waxian_snap.dat"};
char FN6[250]={"55Hz_waxian_migration.dat"};
char FN7[250]={"55Hz_waxian_illumination.dat"};
char FN8[250]={"55Hz_waxian_adcigs.dat"};
char FN9[250]={"55Hz_waxian_adcigs_laplace.dat"};
char FN10[250]={"55Hz_waxian_adcigs_dadcigs.dat"};
char FN11[250]={"55Hz_waxian_migration_stkAdcigs.dat"};
/********* parameters *************/
nx=601;
nz=301; favg=55; pfac=1000.0;
dx=5.0;
dz=5.0;
nt=3001;
dt=0.0005;
ns=200;
fs=nx/ns/2;
ds=nx/ns;
zs=1;
na=70;
da=1;
dadcigs=25;
/********aaa************/
FILE *fpsnap, *fpshot, *fpmig, *fpillum, *fpadcigs, *fpadcigslaplace, *fpdadcigs,*fpstkadcigs;
fpshot=fopen(FN4,"wb");
fpsnap=fopen(FN5,"wb");
fpmig=fopen(FN6,"wb");
fpillum=fopen(FN7,"wb");
fpadcigs=fopen(FN8,"wb");
fpadcigslaplace=fopen(FN9,"wb");
fpdadcigs=fopen(FN10,"wb");
fpstkadcigs=fopen(FN11,"wb");
/*************v***************/
nnx=nx+2*npml;
nnz=nz+2*npml;
nxa=(int)(nx/dadcigs);
/************a*************/
Atemp=(float*)malloc(nz*nx*na*sizeof(float));
v=(float*)malloc(nnz*nnx*sizeof(float));
e=(float*)malloc(nnz*nnx*sizeof(float));
d=(float*)malloc(nnz*nnx*sizeof(float));
shot_Hos=(float*)malloc(nt*nx*sizeof(float));
read_file(FN1,nx,nz,nnx,nnz,dx,dz,favg,dt,v,e,d,npml);
/****************************/
hipSetDevice(0);// initialize device, default device=0;
check_gpu_error("Failed to initialize device!");
/****************************/
hipMalloc(&vp, nnz*nnx*sizeof(float));
hipMalloc(&epsilon, nnz*nnx*sizeof(float));
hipMalloc(&delta, nnz*nnx*sizeof(float));
hipMemcpy(vp, v, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(epsilon, e, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(delta, d, nnz*nnx*sizeof(float), hipMemcpyHostToDevice);
/****************************/
hipMalloc(&s_u0, nnz*nnx*sizeof(float)); hipMalloc(&s_u1, nnz*nnx*sizeof(float));
hipMalloc(&s_w0, nnz*nnx*sizeof(float)); hipMalloc(&s_w1, nnz*nnx*sizeof(float));
hipMalloc(&s_P, nnz*nnx*sizeof(float)); hipMalloc(&s_Q, nnz*nnx*sizeof(float));
hipMalloc(&s_px0, nnz*nnx*sizeof(float)); hipMalloc(&s_px1, nnz*nnx*sizeof(float));
hipMalloc(&s_pz0, nnz*nnx*sizeof(float)); hipMalloc(&s_pz1, nnz*nnx*sizeof(float));
hipMalloc(&s_qx0, nnz*nnx*sizeof(float)); hipMalloc(&s_qx1, nnz*nnx*sizeof(float));
hipMalloc(&s_qz0, nnz*nnx*sizeof(float)); hipMalloc(&s_qz1, nnz*nnx*sizeof(float));
hipMalloc(&g_u0, nnz*nnx*sizeof(float)); hipMalloc(&g_u1, nnz*nnx*sizeof(float));
hipMalloc(&g_w0, nnz*nnx*sizeof(float)); hipMalloc(&g_w1, nnz*nnx*sizeof(float));
hipMalloc(&g_P, nnz*nnx*sizeof(float)); hipMalloc(&g_Q, nnz*nnx*sizeof(float));
hipMalloc(&g_px0, nnz*nnx*sizeof(float)); hipMalloc(&g_px1, nnz*nnx*sizeof(float));
hipMalloc(&g_pz0, nnz*nnx*sizeof(float)); hipMalloc(&g_pz1, nnz*nnx*sizeof(float));
hipMalloc(&g_qx0, nnz*nnx*sizeof(float)); hipMalloc(&g_qx1, nnz*nnx*sizeof(float));
hipMalloc(&g_qz0, nnz*nnx*sizeof(float)); hipMalloc(&g_qz1, nnz*nnx*sizeof(float));
hipMalloc(&coffx1, nnx*sizeof(float)); hipMalloc(&coffx2, nnx*sizeof(float));
hipMalloc(&coffz1, nnz*sizeof(float)); hipMalloc(&coffz2, nnz*sizeof(float));
hipMalloc(&acoffx1, nnx*sizeof(float)); hipMalloc(&acoffx2, nnx*sizeof(float));
hipMalloc(&acoffz1, nnz*sizeof(float)); hipMalloc(&acoffz2, nnz*sizeof(float));
hipMalloc(&shot_Dev, nx*nt*sizeof(float));
hipMalloc(&P_bndr, nt*(2*nx+2*nz)*sizeof(float));
hipMalloc(&Q_bndr, nt*(2*nx+2*nz)*sizeof(float));
hipMalloc(&migration, nz*nx*sizeof(float));
hipMalloc(&illumination, nz*nx*sizeof(float));
hipMalloc(&adcigs, nz*na*nx*sizeof(float));
/******************************/
check_gpu_error("Failed to allocate memory for variables!");
hipLaunchKernelGGL(( get_d0), dim3(1), dim3(1), 0, 0, dx, dz, nnx, nnz, npml, vp);
hipLaunchKernelGGL(( initial_coffe), dim3((nnx+511)/512), dim3(512), 0, 0, dt,nx,coffx1,coffx2,acoffx1,acoffx2,npml);
hipLaunchKernelGGL(( initial_coffe), dim3((nnz+511)/512), dim3(512), 0, 0, dt,nz,coffz1,coffz2,acoffz1,acoffz2,npml);
hipMemset(migration, 0, nz*nx*sizeof(float));
hipMemset(illumination, 0, nz*nx*sizeof(float));
hipMemset(adcigs, 0, nz*na*nx*sizeof(float));
printf("--------------------------------------------------------\n");
printf("---");
start = clock();
/**********IS Loop start*******/
for(is=1;is<=ns;is++)
{
printf("\n--- IS=%3d ",is);
hipMemset(s_u0, 0, nnz*nnx*sizeof(float)); hipMemset(s_u1, 0, nnz*nnx*sizeof(float));
hipMemset(s_w0, 0, nnz*nnx*sizeof(float)); hipMemset(s_w1, 0, nnz*nnx*sizeof(float));
hipMemset(s_P, 0, nnz*nnx*sizeof(float)); hipMemset(s_Q, 0, nnz*nnx*sizeof(float));
hipMemset(s_px0, 0, nnz*nnx*sizeof(float)); hipMemset(s_px1, 0, nnz*nnx*sizeof(float));
hipMemset(s_pz0, 0, nnz*nnx*sizeof(float)); hipMemset(s_pz1, 0, nnz*nnx*sizeof(float));
hipMemset(s_qx0, 0, nnz*nnx*sizeof(float)); hipMemset(s_qx1, 0, nnz*nnx*sizeof(float));
hipMemset(s_qz0, 0, nnz*nnx*sizeof(float)); hipMemset(s_qz1, 0, nnz*nnx*sizeof(float));
hipMemset(g_u0, 0, nnz*nnx*sizeof(float)); hipMemset(g_u1, 0, nnz*nnx*sizeof(float));
hipMemset(g_w0, 0, nnz*nnx*sizeof(float)); hipMemset(g_w1, 0, nnz*nnx*sizeof(float));
hipMemset(g_P, 0, nnz*nnx*sizeof(float)); hipMemset(g_Q, 0, nnz*nnx*sizeof(float));
hipMemset(g_px0, 0, nnz*nnx*sizeof(float)); hipMemset(g_px1, 0, nnz*nnx*sizeof(float));
hipMemset(g_pz0, 0, nnz*nnx*sizeof(float)); hipMemset(g_pz1, 0, nnz*nnx*sizeof(float));
hipMemset(g_qx0, 0, nnz*nnx*sizeof(float)); hipMemset(g_qx1, 0, nnz*nnx*sizeof(float));
hipMemset(g_qz0, 0, nnz*nnx*sizeof(float)); hipMemset(g_qz1, 0, nnz*nnx*sizeof(float));
hipMemset(shot_Dev, 0, nt*nx*sizeof(float));
hipMemset(P_bndr, 0, nt*(2*nx+2*nz)*sizeof(float));
hipMemset(Q_bndr, 0, nt*(2*nx+2*nz)*sizeof(float));
/*a***********************************Forward*******************************************/
for(it=0,t=dt;it<nt;it++,t+=dt)
{
//if(it==0)printf(" > F >",is,it);
/*a#####################a*/
/*a## Forward ##a*/
/*a#####################a*/
hipLaunchKernelGGL(( add_source), dim3(1),dim3(1), 0, 0, pfac,fs,zs,nx,nz,nnx,nnz,dt,t,favg,wtype,npml,is,ds,s_P,s_Q);
hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz,
s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2);
hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml,
s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,true);
s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1;
hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, shot_Dev, true);
hipLaunchKernelGGL(( wavefield_bndr), dim3(((2*nx+2*nz)+511)/512),dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, true);
hipLaunchKernelGGL(( cal_illumination), dim3((nx*nz+511)/512), dim3(512), 0, 0, nnx, nnz, nz, npml, illumination, s_P, s_Q);
if((is==1)&&(it%300==0))
{
hipMemcpy(e, s_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}
}//it loop end
hipLaunchKernelGGL(( mute_directwave), dim3((nx*nt+511)/512), dim3(512), 0, 0, nx,nt,dt,favg,dx,dz,fs,ds,zs,is,vp,epsilon,shot_Dev,20);
hipMemcpy(shot_Hos, shot_Dev, nt*nx*sizeof(float), hipMemcpyDeviceToHost);
fseek(fpshot,(is-1)*nt*nx*sizeof(float),0);
fwrite(shot_Hos,sizeof(float),nt*nx,fpshot);
/*a***********************************Backward*******************************************/
for(it=nt-1;it>=0;it--)
{
// if(it==0)printf(" B ",is,it);
/*a#####################a*/
/*a## Reconstruction ##a*/
/*a#####################a*/
hipLaunchKernelGGL(( wavefield_bndr), dim3(((2*nx+2*nz)+511)/512),dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, false);
hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz,
s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2);
hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml,
s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false);
s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1;
/* if((is==1)&&(it%300==0))
{
hipMemcpy(e, s_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}*/
/*a#####################a*/
/*a## Backward ##a*/
/*a#####################a*/
hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, g_P, shot_Dev, false);
hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, g_Q, shot_Dev, false);
hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz,
g_u0,g_w0,g_u1,g_w1,g_P,g_Q,coffx1,coffx2,coffz1,coffz2);
hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,g_u1,g_w1,g_P,g_Q,vp,npml,
g_px1,g_px0,g_pz1,g_pz0,g_qx1,g_qx0,g_qz1,g_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false);
g_u0=g_u1; g_w0=g_w1; g_px0=g_px1; g_pz0=g_pz1; g_qx0=g_qx1; g_qz0=g_qz1;
/* if((is==1)&&(it%300==0))
{
hipMemcpy(e, g_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}*/
hipLaunchKernelGGL(( cal_migration), dim3((nx*nz+511)/512), dim3(512), 0, 0, nnx, nnz, nz, npml, migration, s_P, g_P);
hipLaunchKernelGGL(( Poynting_Adcigs), dim3((nx*nz+511)/512), dim3(512), 0, 0, nnz, nx, nz, npml, na, da, adcigs,
s_P, s_Q, s_u0, s_w0, g_P, g_Q, g_u0, g_w0);
}//it loop end
}//is loop end
hipLaunchKernelGGL(( migration_illum), dim3((nx*nz+511)/512), dim3(512), 0, 0, nx, nz, npml, migration, illumination);
hipLaunchKernelGGL(( adcigs_illum), dim3((nx*nz*na+511)/512), dim3(512), 0, 0, nx, nz, na, da, adcigs, illumination);
printf("\n->");
hipMemcpy(e, migration, nz*nx*sizeof(float), hipMemcpyDeviceToHost);
migraiton_laplace_filter(1,nz,nx,e,d);
fwrite(d,sizeof(float),nx*nz,fpmig);
printf("->");
hipMemcpy(e, illumination, nz*nx*sizeof(float), hipMemcpyDeviceToHost);
fwrite(e,sizeof(float),nx*nz,fpillum);
printf("->");
hipMemcpy(Atemp, adcigs, nz*nx*na*sizeof(float), hipMemcpyDeviceToHost);
fwrite(Atemp,sizeof(float),nz*nx*na,fpadcigs);
printf("->");
adcigs_laplace_filter(nx,nz,na,Atemp,2);/*1:(0-na);2:(na-0);3:(0-na-0)*/
fwrite(Atemp,sizeof(float),nz*nx*na,fpadcigslaplace);
printf("->");
stk_adcigs(nx,nz,na,Atemp,d);
fwrite(d,sizeof(float),nx*nz,fpstkadcigs);
printf("->");
adcigs_chouxi(nx,nz,na,nxa,dadcigs,Atemp);
fwrite(Atemp,sizeof(float),nz*nxa*na,fpdadcigs);
printf(" done!\n");
end = clock();
/*********IS Loop end*********/
printf("\n--- Complete!!!!!!!!! \n");
printf("total %d shots: %f (min)\n", ns, ((float)(end-start))/60.0/CLOCKS_PER_SEC);
/***********close************/
fclose(fpsnap); fclose(fpshot); fclose(fpmig);
fclose(fpillum); fclose(fpadcigs);fclose(fpadcigslaplace);
fclose(fpdadcigs); fclose(fpstkadcigs);
/***********free*************/
hipFree(coffx1); hipFree(coffx2);
hipFree(coffz1); hipFree(coffz2);
hipFree(acoffx1); hipFree(acoffx2);
hipFree(acoffz1); hipFree(acoffz2);
hipFree(s_u0); hipFree(s_u1);
hipFree(s_w0); hipFree(s_w1);
hipFree(s_P); hipFree(s_Q);
hipFree(s_px0); hipFree(s_px1);
hipFree(s_pz0); hipFree(s_pz1);
hipFree(s_qx0); hipFree(s_qx1);
hipFree(s_qz0); hipFree(s_qz1);
hipFree(g_u0); hipFree(g_u1);
hipFree(g_w0); hipFree(g_w1);
hipFree(g_P); hipFree(g_Q);
hipFree(g_px0); hipFree(g_px1);
hipFree(g_pz0); hipFree(g_pz1);
hipFree(g_qx0); hipFree(g_qx1);
hipFree(g_qz0); hipFree(g_qz1);
hipFree(shot_Dev);
hipFree(P_bndr); hipFree(Q_bndr);
hipFree(migration);
hipFree(illumination);
hipFree(adcigs);
/***************host free*****************/
free(v); free(e); free(d);
free(shot_Hos); free(Atemp);
exit(0);
}
| 09272041b489eaedeba8e87cae78422ca80a043b.cu | //a#########################################################
//a## 2D Acoustic ISO Medium RTM
//a## Ps : P + sv wave and get rid of sv
//a## GPU(CUDA) ,poynting adcigs
//a##
//a##/*a****************************************************/
//a##Function for ISO medium modeling,2017.10.12
//a##
//a## FD use VTI finite different
//a##
//a## Rong Tao
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#define pi 3.141592653
#define mm 4
//__constant__ float c[mm]={1.125,-0.04166667};/*mm==2*/
//__constant__ float c[mm]={1.1718750,-0.065104167,0.0046875};/*mm==3*/
__constant__ float c[mm]={1.196289,-0.0797526,0.009570313,-0.0006975447};/*mm==4*/
//__constant__ float c[mm]={1.211243,-0.08972168,0.01384277,-0.00176566,0.0001186795};/*mm==5*/
__device__ float d0;
//a################################################################################
void check_gpu_error (const char *msg)
/*< check GPU errors >*/
{
cudaError_t err = cudaGetLastError ();
if (cudaSuccess != err) {
printf("Cuda error: %s: %s\n", msg, cudaGetErrorString (err));
exit(0);
}
}
/*************func**************/
void migraiton_laplace_filter(int adj, int nz, int nx, float *in, float *out)
/*< linear operator, come from Madagascar Mlaplac2>*/
{
int iz,ix,j;
for (j=0;j<nx*nz;j++) out[j]=0.0;
for (ix=0; ix < nx; ix++) {
for (iz=0; iz < nz; iz++) {
j = iz+ix*nz;
if (iz > 0) {
if (adj) {
out[j-1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-1];
}
}
if (iz < nz-1) {
if (adj) {
out[j+1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+1];
}
}
if (ix > 0) {
if (adj) {
out[j-nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-nz];
}
}
if (ix < nx-1) {
if (adj) {
out[j+nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+nz];
}
}
}
}
}
void laplac2_lop(int adj, int nz, int nx, float *in, float *out)
/*< linear operator >*/
{
int iz,ix,j;
for (ix=0; ix < nx; ix++) {
for (iz=0; iz < nz; iz++) {
j = iz+ix*nz;
if (iz > 0) {
if (adj) {
out[j-1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-1];
}
}
if (iz < nz-1) {
if (adj) {
out[j+1] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+1];
}
}
if (ix > 0) {
if (adj) {
out[j-nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j-nz];
}
}
if (ix < nx-1) {
if (adj) {
out[j+nz] -= in[j];
out[j] += in[j];
} else {
out[j] += in[j] - in[j+nz];
}
}
}
}
}
/*************func**************/
__global__ void add_source(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt,float t,
float favg,int wtype,int npml,int is,int ds,float *P,float *Q)
/*< generate ricker wavelet with time deley >*/
{
int ixs,izs;
float x_,xx_,tdelay,ts,source=0.0,fs;
tdelay=1.0/favg;
ts=t-tdelay;
fs=xsn+(is-1)*ds;
if(wtype==1)//ricker wavelet
{
x_=favg*ts;
xx_=x_*x_;
source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_));
}else if(wtype==2){//derivative of gaussian
x_=(-4)*favg*favg*pi*pi/log(0.1);
source=(-2)*pi*pi*ts*exp(-x_*ts*ts);
}else if(wtype==3){//derivative of gaussian
x_=(-1)*favg*favg*pi*pi/log(0.1);
source=exp(-x_*ts*ts);
}
if(t<=2*tdelay)
{
ixs = (int)(fs+0.5)+npml-1;
izs = (int)(zsn+0.5)+npml-1;
P[ixs*nnz+izs]+=pfac*source;
Q[ixs*nnz+izs]+=pfac*source;
}
}
/*******************func*********************/
__global__ void update_vel(int nx,int nz,int nnx,int nnz,int npml,float dt,float dx,float dz,
float *u0,float *w0,float *u1,float *w1,float *P,float *Q,
float *coffx1,float *coffx2,float *coffz1,float *coffz2)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix,iz,im;
float dtx,dtz,xx,zz;
ix=id/nnz;
iz=id%nnz;
dtx=dt/dx;
dtz=dt/dz;
if(id>=mm&&id<nnx*nnz-mm)
{
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]);
zz+=c[im]*(Q[id+im+1] -Q[id-im]);
}
u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx;
w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz;
}
}
}
/*******************func***********************/
__global__ void update_stress(int nx,int nz,int nnx,int nnz,float dt,float dx,float dz,
float *u1,float *w1,float *P,float *Q,float *vp,int npml,
float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0,
float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2,
float *delta,float *epsilon,int fs,int ds,int zs,int is,bool SV)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int im,ix,iz,rx,rz,R=15,r=5;
float dtx,dtz, xx,zz,ee,dd;
ix=id/nnz;
iz=id%nnz;
dtx=dt/dx;
dtz=dt/dz;
if(id>=mm&&id<nnx*nnz-mm)
{
/************************i****************************************/
/************************iso circle start*************************/
rx=ix-(fs+(is-1)*ds+npml);
rz=iz-(zs+npml);
if(SV){
if((rx*rx+rz*rz)<=R*R){
if((rx*rx+rz*rz)<=r*r){
ee = 0.0;
dd = 0.0;
}else{
ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilon[id];
dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*delta[id];
}
}else{
ee=epsilon[id];
dd=delta[id];
}
}else{
ee=epsilon[id];
dd=delta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm))
{
xx=0.0;
zz=0.0;
for(im=0;im<mm;im++)
{
xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]);
zz+=c[im]*(w1[id+im] -w1[id-im-1]);
}
px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx;
pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz;
qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx;
qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz;
P[id]=px1[id]+pz1[id];
Q[id]=qx1[id]+qz1[id];
}
}
}
/********************func**********************/
__global__ void get_d0(float dx,float dz,int nnx,int nnz,int npml,float *vp)
{
d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npml*((dx+dz)/2.0));
}
/*************func*******************/
void pad_vv(int nx,int nz,int nnx,int nnz,int npml,float *ee)
{
int ix,iz,id;
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(ix<npml){
ee[id]=ee[npml*nnz+iz]; //left
}else if(ix>=nnx-npml){
ee[id]=ee[(nnx-npml-1)*nnz+iz];//right
}
}
for(id=0;id<nnx*nnz;id++)
{
ix=id/nnz;
iz=id%nnz;
if(iz<npml){
ee[id]=ee[ix*nnz+npml];//up
}else if(iz>=nnz-npml){
ee[id]=ee[ix*nnz+nnz-npml-1];//down
}
}
}
/*************func*******************/
void read_file(char FN1[],int nx,int nz,int nnx,int nnz,float dx,float dz,float favg,float dt,
float *v,float *e,float *d,int npml)
{
int i,j,id;
float vmax, vmin, H_min, dt_max, dxz_max, C, tmp;
FILE *fp1;
if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);exit(0);}
vmin= 999999.9;
vmax=-999999.9;
for(i=npml;i<nx+npml;i++)
{
for(j=npml;j<nz+npml;j++)
{
id=i*nnz+j;
fread(&v[id],4L,1,fp1);
e[id] = 0.0;
d[id] = 0.0;
if(vmax<v[id]) vmax = v[id];
if(vmin>v[id]) vmin = v[id];
}
}
fclose(fp1);
printf("------------------------------------\n---\n");
printf("--- Vmax=%.4f, Vmin=%.4f\n",vmax,vmin);
/*********boundary*********/
pad_vv(nx,nz,nnx,nnz,npml,e);
pad_vv(nx,nz,nnx,nnz,npml,d);
pad_vv(nx,nz,nnx,nnz,npml,v);
H_min=dx<dz?dx:dz;
dt_max = 0.5*H_min/vmin;
dxz_max = vmax/favg*0.2;
if(dxz_max<dz||dxz_max<dx){printf("--- You need have to redefine DX and DZ ! \n");exit(0);}
if(dt_max<dt){printf("--- You need have to redefine DT ! \n");exit(0);}
if ( favg >= vmin/( 5.0*(dx>dz?dx:dz) ) || favg >= vmin/( 5.0*(dx>dz?dx:dz) ) )
{printf("--- Non-dispersion relation not satisfied! \n");exit(0);}
else if ( mm == 2 ) C = 0.857;
else if ( mm == 3 ) C = 0.8;
else if ( mm == 4 ) C = 0.777;
else if ( mm == 5 ) C = 0.759;
tmp = dt*vmax*sqrtf( 1.0/(dx*dx)+1.0/(dz*dz) );
if ( tmp >= C){ printf("--- Stability condition not satisfied! tmp = %f, C = %f\n",tmp,C);exit(0);}
}
/*************func*******************/
__global__ void initial_coffe(float dt,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npml)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nn+2*npml)
{
if(id<npml)
{
coff1[id]=1.0/(1.0+(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0);
}else if(id>=npml&&id<npml+nn){
coff1[id]=1.0;
coff2[id]=1.0;
acoff1[id]=1.0;
acoff2[id]=1.0;
}else{
coff1[id]=1.0/(1.0+(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0);
coff2[id]=coff1[id]*(1.0-(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0);
acoff1[id]=1.0/(1.0+(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0);
acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0);
}
}
}
/*************func*******************/
__global__ void shot_record(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *shot, bool flag)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx)
{
if(flag){
shot[it+nt*id]=P[npml+nnz*(id+npml)];
}else{
P[npml+nnz*(id+npml)]=shot[it+nt*id];
}
}
}
/*************func*******************/
__global__ void wavefield_bndr(int nnx, int nnz, int nx, int nz, int npml, int it, int nt,
float *P, float *Q, float *P_bndr, float *Q_bndr, bool flag)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<2*nx+2*nz)
{
if(flag)/////////////////////////////////save boundary
{
if(id<nx){//up
P_bndr[it*(2*nx+2*nz)+id]=P[npml-1+nnz*(id+npml)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[npml-1+nnz*(id+npml)];
}else if(id>=nx&&id<(2*nx)){//down
P_bndr[it*(2*nx+2*nz)+id]=P[npml+nz+1+nnz*(id-nx+npml)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[npml+nz+1+nnz*(id-nx+npml)];
}else if(id>=(2*nx)&&id<(2*nx+nz)){//left
P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx+npml+nnz*(npml-1)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx+npml+nnz*(npml-1)];
}else if(id>=(2*nx+nz)){//right
P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx-nz+npml+nnz*(npml+nx+1)];
Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx-nz+npml+nnz*(npml+nx+1)];
}
}else{/////////////////////////////add boundary
if(id<nx){//up
P[npml-1+nnz*(id+npml)]=P_bndr[it*(2*nx+2*nz)+id];
Q[npml-1+nnz*(id+npml)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=nx&&id<(2*nx)){//down
P[npml+nz+1+nnz*(id-nx+npml)]=P_bndr[it*(2*nx+2*nz)+id];
Q[npml+nz+1+nnz*(id-nx+npml)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=(2*nx)&&id<(2*nx+nz)){//left
P[id-2*nx+npml+nnz*(npml-1)]=P_bndr[it*(2*nx+2*nz)+id];
Q[id-2*nx+npml+nnz*(npml-1)]=Q_bndr[it*(2*nx+2*nz)+id];
}else if(id>=(2*nx+nz)){//right
P[id-2*nx-nz+npml+nnz*(npml+nx+1)]=P_bndr[it*(2*nx+2*nz)+id];
Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]=Q_bndr[it*(2*nx+2*nz)+id];
}
}
}
}
/*************func**************/
__global__ void mute_directwave(int nx,int nt,float dt,float favg,
float dx,float dz,int fs,int ds,int zs,int is,
float *vp,float *epsilon,float *shot,int tt)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_z,mu_t0;
int ix=id/nt;
int it=id%nt;
if(id<nx*nt)
{
mu_x=dx*abs(ix-fs-(is-1)*ds);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if((it>(int)(mu_t0/dt)-tt)&&(it<mu_nt))
shot[id]=0.0;
}
}
/*************func**************/
__global__ void cal_illumination(int nnx, int nnz, int nz, int npml, float *illumination, float *P, float *Q)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
if(id<nnx*nnz)
{
illumination[id]+=P[iz+npml+nnz*(ix+npml)]*P[iz+npml+nnz*(ix+npml)]
+Q[iz+npml+nnz*(ix+npml)]*Q[iz+npml+nnz*(ix+npml)];
if(illumination[id]==0)illumination[id]=1.0;
}
}
/*************func**************/
__global__ void cal_migration(int nnx, int nnz, int nz, int npml, float *migration, float *s, float *g)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
if(id<nnx*nnz)
{
migration[id]+=s[iz+npml+nnz*(ix+npml)]*g[iz+npml+nnz*(ix+npml)];
}
}
/*************func**************/
__global__ void migration_illum(int nx, int nz, int npml, float *migration, float *illumination)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<nx*nz)
{
migration[id]/=illumination[id];//*illumination[id];
}
}
/*************func**************/
__global__ void Poynting_Adcigs(int nnz, int nx, int nz, int npml, int na, int da, float *adcigs,
float *s_P, float *s_Q, float *s_u, float *s_w,
float *g_P, float *g_Q, float *g_u, float *g_w)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/nz;
int iz=id%nz;
int ia=0;
float Ssx=-s_P[iz+npml+nnz*(ix+npml)]*s_u[iz+npml+nnz*(ix+npml)];
float Ssz=-s_Q[iz+npml+nnz*(ix+npml)]*s_w[iz+npml+nnz*(ix+npml)];
float Sgx= g_P[iz+npml+nnz*(ix+npml)]*g_u[iz+npml+nnz*(ix+npml)];
float Sgz= g_Q[iz+npml+nnz*(ix+npml)]*g_w[iz+npml+nnz*(ix+npml)];
float b1= Ssx*Ssx + Ssz*Ssz;
float b2= Sgx*Sgx + Sgz*Sgz;
float a=(Ssx*Sgx + Ssz*Sgz)/(sqrtf(b1*b2)*(1 - 0.1));
if(id<nx*nz)
{
if(a>=-1&&a<=1)
{
a=0.5*acosf(a)*180.0/pi;
ia=(int)(a/(da*1.0));
if(ia<na)
{
adcigs[iz+nz*ia+nz*na*(id/nz)] += s_P[iz+npml+nnz*(ix+npml)]*g_P[iz+npml+nnz*(ix+npml)]
*cosf(ia*pi/180.0)*cosf(ia*pi/180.0)*cosf(ia*pi/180.0);
}
}
}
}
/*************func**************/
__global__ void adcigs_illum(int nx, int nz, int na, int da, float *adcigs, float *illumination)
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int ix=id/(nz*na);
int iz=id%nz;
if(id<nx*nz*na)
{
adcigs[id]/=illumination[iz+nz*ix];//*illumination[iz+nz*ix];
}
}
/*************func**************/
void adcigs_laplace_filter(int nx,int nz,int na,float *adcigs,int flag)
{
int ix,iz,ia,id,ido;
float *in, *out,*temp;
in=(float*)malloc(nz*nx*sizeof(float));
out=(float*)malloc(nz*nx*sizeof(float));
temp=(float*)malloc(nz*nx*na*sizeof(float));
if(flag==1||flag==3)
for (ia=0; ia<na; ia++){
for (ix=0; ix<nx; ix++){
for (iz=0; iz<nz; iz++){
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
in[ido]=adcigs[id];
}
}
laplac2_lop( 1, nz, nx, in, out );
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
temp[id]+=out[ido];
if(flag==1)adcigs[id]=temp[id];
}
}
}
if(flag!=1)
for (ia=na-1; ia>=0; ia--) {
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
in[ido]=adcigs[id];
}
}
laplac2_lop( 1, nz, nx, in, out );
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
ido=ix*nz+iz;
temp[id]+=out[ido];
if(flag==2||flag==3) adcigs[id]=temp[id];
}
}
}
}
/*************func**************/
void adcigs_chouxi(int nx,int nz,int na,int nxa,int dadcigs,float *adcigs)
{
int ix,iz,ia,id,ido;
float *temp;
temp=(float*)malloc(nz*nxa*na*sizeof(float));
for (ix=0; ix<nx; ix++) {
for (ia=0; ia<na; ia++) {
for (iz=0; iz<nz; iz++) {
id=ix*na*nz+ia*nz+iz;
if(ix%dadcigs==0) {
ido=ix/dadcigs*na*nz+ia*nz+iz;
temp[ido]=adcigs[id];
adcigs[ido]=temp[ido];
}
}
}
}
}
/*************func**************/
void stk_adcigs(int nx,int nz,int na,float *adcigs,float *migration)
{
int ix,iz,ia,id,ido;
float stk;
for (ix=0; ix<nx; ix++) {
for (iz=0; iz<nz; iz++) {
stk=0.0;
for (ia=0; ia<na; ia++) {
id=ix*na*nz+ia*nz+iz;
stk+=adcigs[id];
}
ido=ix*nz+iz;
migration[ido]=stk;
}
}
}
//a########################################################################
//a## Main Function ##
//a########################################################################
int main(int argc,char *argv[])
{
int is, it, nx, nz, nnx, nnz, nt, wtype, na, da, dadcigs, nxa;
int ns, ds, fs, zs, npml;
float dx, dz, dt, t, pfac, favg;
float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2;
float *v, *e, *d;
float *vp, *epsilon, *delta;
float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1;
float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1;
float *g_u0, *g_u1, *g_px0, *g_qx0, *g_px1, *g_qx1;
float *g_w0, *g_w1, *g_pz0, *g_qz0, *g_pz1, *g_qz1;
float *s_P, *s_Q, *g_P, *g_Q, *shot_Dev, *shot_Hos, *P_bndr, *Q_bndr;
float *migration, *illumination, *adcigs;
float *Atemp;
clock_t start, end;
/*************wavelet\boundary**************/
wtype=1;npml=20;
/********** dat document ***********/
char FN1[250]={"waxian_vel_601_301.dat"};
char FN4[250]={"55Hz_waxian_shot_obs.dat"};
char FN5[250]={"55Hz_waxian_snap.dat"};
char FN6[250]={"55Hz_waxian_migration.dat"};
char FN7[250]={"55Hz_waxian_illumination.dat"};
char FN8[250]={"55Hz_waxian_adcigs.dat"};
char FN9[250]={"55Hz_waxian_adcigs_laplace.dat"};
char FN10[250]={"55Hz_waxian_adcigs_dadcigs.dat"};
char FN11[250]={"55Hz_waxian_migration_stkAdcigs.dat"};
/********* parameters *************/
nx=601;
nz=301; favg=55; pfac=1000.0;
dx=5.0;
dz=5.0;
nt=3001;
dt=0.0005;
ns=200;
fs=nx/ns/2;
ds=nx/ns;
zs=1;
na=70;
da=1;
dadcigs=25;
/********aaa************/
FILE *fpsnap, *fpshot, *fpmig, *fpillum, *fpadcigs, *fpadcigslaplace, *fpdadcigs,*fpstkadcigs;
fpshot=fopen(FN4,"wb");
fpsnap=fopen(FN5,"wb");
fpmig=fopen(FN6,"wb");
fpillum=fopen(FN7,"wb");
fpadcigs=fopen(FN8,"wb");
fpadcigslaplace=fopen(FN9,"wb");
fpdadcigs=fopen(FN10,"wb");
fpstkadcigs=fopen(FN11,"wb");
/*************v***************/
nnx=nx+2*npml;
nnz=nz+2*npml;
nxa=(int)(nx/dadcigs);
/************a*************/
Atemp=(float*)malloc(nz*nx*na*sizeof(float));
v=(float*)malloc(nnz*nnx*sizeof(float));
e=(float*)malloc(nnz*nnx*sizeof(float));
d=(float*)malloc(nnz*nnx*sizeof(float));
shot_Hos=(float*)malloc(nt*nx*sizeof(float));
read_file(FN1,nx,nz,nnx,nnz,dx,dz,favg,dt,v,e,d,npml);
/****************************/
cudaSetDevice(0);// initialize device, default device=0;
check_gpu_error("Failed to initialize device!");
/****************************/
cudaMalloc(&vp, nnz*nnx*sizeof(float));
cudaMalloc(&epsilon, nnz*nnx*sizeof(float));
cudaMalloc(&delta, nnz*nnx*sizeof(float));
cudaMemcpy(vp, v, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(epsilon, e, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(delta, d, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice);
/****************************/
cudaMalloc(&s_u0, nnz*nnx*sizeof(float)); cudaMalloc(&s_u1, nnz*nnx*sizeof(float));
cudaMalloc(&s_w0, nnz*nnx*sizeof(float)); cudaMalloc(&s_w1, nnz*nnx*sizeof(float));
cudaMalloc(&s_P, nnz*nnx*sizeof(float)); cudaMalloc(&s_Q, nnz*nnx*sizeof(float));
cudaMalloc(&s_px0, nnz*nnx*sizeof(float)); cudaMalloc(&s_px1, nnz*nnx*sizeof(float));
cudaMalloc(&s_pz0, nnz*nnx*sizeof(float)); cudaMalloc(&s_pz1, nnz*nnx*sizeof(float));
cudaMalloc(&s_qx0, nnz*nnx*sizeof(float)); cudaMalloc(&s_qx1, nnz*nnx*sizeof(float));
cudaMalloc(&s_qz0, nnz*nnx*sizeof(float)); cudaMalloc(&s_qz1, nnz*nnx*sizeof(float));
cudaMalloc(&g_u0, nnz*nnx*sizeof(float)); cudaMalloc(&g_u1, nnz*nnx*sizeof(float));
cudaMalloc(&g_w0, nnz*nnx*sizeof(float)); cudaMalloc(&g_w1, nnz*nnx*sizeof(float));
cudaMalloc(&g_P, nnz*nnx*sizeof(float)); cudaMalloc(&g_Q, nnz*nnx*sizeof(float));
cudaMalloc(&g_px0, nnz*nnx*sizeof(float)); cudaMalloc(&g_px1, nnz*nnx*sizeof(float));
cudaMalloc(&g_pz0, nnz*nnx*sizeof(float)); cudaMalloc(&g_pz1, nnz*nnx*sizeof(float));
cudaMalloc(&g_qx0, nnz*nnx*sizeof(float)); cudaMalloc(&g_qx1, nnz*nnx*sizeof(float));
cudaMalloc(&g_qz0, nnz*nnx*sizeof(float)); cudaMalloc(&g_qz1, nnz*nnx*sizeof(float));
cudaMalloc(&coffx1, nnx*sizeof(float)); cudaMalloc(&coffx2, nnx*sizeof(float));
cudaMalloc(&coffz1, nnz*sizeof(float)); cudaMalloc(&coffz2, nnz*sizeof(float));
cudaMalloc(&acoffx1, nnx*sizeof(float)); cudaMalloc(&acoffx2, nnx*sizeof(float));
cudaMalloc(&acoffz1, nnz*sizeof(float)); cudaMalloc(&acoffz2, nnz*sizeof(float));
cudaMalloc(&shot_Dev, nx*nt*sizeof(float));
cudaMalloc(&P_bndr, nt*(2*nx+2*nz)*sizeof(float));
cudaMalloc(&Q_bndr, nt*(2*nx+2*nz)*sizeof(float));
cudaMalloc(&migration, nz*nx*sizeof(float));
cudaMalloc(&illumination, nz*nx*sizeof(float));
cudaMalloc(&adcigs, nz*na*nx*sizeof(float));
/******************************/
check_gpu_error("Failed to allocate memory for variables!");
get_d0<<<1, 1>>>(dx, dz, nnx, nnz, npml, vp);
initial_coffe<<<(nnx+511)/512, 512>>>(dt,nx,coffx1,coffx2,acoffx1,acoffx2,npml);
initial_coffe<<<(nnz+511)/512, 512>>>(dt,nz,coffz1,coffz2,acoffz1,acoffz2,npml);
cudaMemset(migration, 0, nz*nx*sizeof(float));
cudaMemset(illumination, 0, nz*nx*sizeof(float));
cudaMemset(adcigs, 0, nz*na*nx*sizeof(float));
printf("--------------------------------------------------------\n");
printf("---");
start = clock();
/**********IS Loop start*******/
for(is=1;is<=ns;is++)
{
printf("\n--- IS=%3d ",is);
cudaMemset(s_u0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_u1, 0, nnz*nnx*sizeof(float));
cudaMemset(s_w0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_w1, 0, nnz*nnx*sizeof(float));
cudaMemset(s_P, 0, nnz*nnx*sizeof(float)); cudaMemset(s_Q, 0, nnz*nnx*sizeof(float));
cudaMemset(s_px0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_px1, 0, nnz*nnx*sizeof(float));
cudaMemset(s_pz0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_pz1, 0, nnz*nnx*sizeof(float));
cudaMemset(s_qx0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qx1, 0, nnz*nnx*sizeof(float));
cudaMemset(s_qz0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qz1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_u0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_u1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_w0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_w1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_P, 0, nnz*nnx*sizeof(float)); cudaMemset(g_Q, 0, nnz*nnx*sizeof(float));
cudaMemset(g_px0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_px1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_pz0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_pz1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_qx0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qx1, 0, nnz*nnx*sizeof(float));
cudaMemset(g_qz0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qz1, 0, nnz*nnx*sizeof(float));
cudaMemset(shot_Dev, 0, nt*nx*sizeof(float));
cudaMemset(P_bndr, 0, nt*(2*nx+2*nz)*sizeof(float));
cudaMemset(Q_bndr, 0, nt*(2*nx+2*nz)*sizeof(float));
/*a***********************************Forward*******************************************/
for(it=0,t=dt;it<nt;it++,t+=dt)
{
//if(it==0)printf(" > F >",is,it);
/*a#####################a*/
/*a## Forward ##a*/
/*a#####################a*/
add_source<<<1,1>>>(pfac,fs,zs,nx,nz,nnx,nnz,dt,t,favg,wtype,npml,is,ds,s_P,s_Q);
update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz,
s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2);
update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml,
s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,true);
s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1;
shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, shot_Dev, true);
wavefield_bndr<<<((2*nx+2*nz)+511)/512,512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, true);
cal_illumination<<<(nx*nz+511)/512, 512>>>(nnx, nnz, nz, npml, illumination, s_P, s_Q);
if((is==1)&&(it%300==0))
{
cudaMemcpy(e, s_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}
}//it loop end
mute_directwave<<<(nx*nt+511)/512, 512>>>(nx,nt,dt,favg,dx,dz,fs,ds,zs,is,vp,epsilon,shot_Dev,20);
cudaMemcpy(shot_Hos, shot_Dev, nt*nx*sizeof(float), cudaMemcpyDeviceToHost);
fseek(fpshot,(is-1)*nt*nx*sizeof(float),0);
fwrite(shot_Hos,sizeof(float),nt*nx,fpshot);
/*a***********************************Backward*******************************************/
for(it=nt-1;it>=0;it--)
{
// if(it==0)printf(" B ",is,it);
/*a#####################a*/
/*a## Reconstruction ##a*/
/*a#####################a*/
wavefield_bndr<<<((2*nx+2*nz)+511)/512,512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, false);
update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz,
s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2);
update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml,
s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false);
s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1;
/* if((is==1)&&(it%300==0))
{
cudaMemcpy(e, s_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}*/
/*a#####################a*/
/*a## Backward ##a*/
/*a#####################a*/
shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, g_P, shot_Dev, false);
shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, g_Q, shot_Dev, false);
update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz,
g_u0,g_w0,g_u1,g_w1,g_P,g_Q,coffx1,coffx2,coffz1,coffz2);
update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,g_u1,g_w1,g_P,g_Q,vp,npml,
g_px1,g_px0,g_pz1,g_pz0,g_qx1,g_qx0,g_qz1,g_qz0,
acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false);
g_u0=g_u1; g_w0=g_w1; g_px0=g_px1; g_pz0=g_pz1; g_qx0=g_qx1; g_qz0=g_qz1;
/* if((is==1)&&(it%300==0))
{
cudaMemcpy(e, g_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(e,4L,nnx*nnz,fpsnap);
}*/
cal_migration<<<(nx*nz+511)/512, 512>>>(nnx, nnz, nz, npml, migration, s_P, g_P);
Poynting_Adcigs<<<(nx*nz+511)/512, 512>>>(nnz, nx, nz, npml, na, da, adcigs,
s_P, s_Q, s_u0, s_w0, g_P, g_Q, g_u0, g_w0);
}//it loop end
}//is loop end
migration_illum<<<(nx*nz+511)/512, 512>>>(nx, nz, npml, migration, illumination);
adcigs_illum<<<(nx*nz*na+511)/512, 512>>>(nx, nz, na, da, adcigs, illumination);
printf("\n->");
cudaMemcpy(e, migration, nz*nx*sizeof(float), cudaMemcpyDeviceToHost);
migraiton_laplace_filter(1,nz,nx,e,d);
fwrite(d,sizeof(float),nx*nz,fpmig);
printf("->");
cudaMemcpy(e, illumination, nz*nx*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(e,sizeof(float),nx*nz,fpillum);
printf("->");
cudaMemcpy(Atemp, adcigs, nz*nx*na*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(Atemp,sizeof(float),nz*nx*na,fpadcigs);
printf("->");
adcigs_laplace_filter(nx,nz,na,Atemp,2);/*1:(0-na);2:(na-0);3:(0-na-0)*/
fwrite(Atemp,sizeof(float),nz*nx*na,fpadcigslaplace);
printf("->");
stk_adcigs(nx,nz,na,Atemp,d);
fwrite(d,sizeof(float),nx*nz,fpstkadcigs);
printf("->");
adcigs_chouxi(nx,nz,na,nxa,dadcigs,Atemp);
fwrite(Atemp,sizeof(float),nz*nxa*na,fpdadcigs);
printf(" done!\n");
end = clock();
/*********IS Loop end*********/
printf("\n--- Complete!!!!!!!!! \n");
printf("total %d shots: %f (min)\n", ns, ((float)(end-start))/60.0/CLOCKS_PER_SEC);
/***********close************/
fclose(fpsnap); fclose(fpshot); fclose(fpmig);
fclose(fpillum); fclose(fpadcigs);fclose(fpadcigslaplace);
fclose(fpdadcigs); fclose(fpstkadcigs);
/***********free*************/
cudaFree(coffx1); cudaFree(coffx2);
cudaFree(coffz1); cudaFree(coffz2);
cudaFree(acoffx1); cudaFree(acoffx2);
cudaFree(acoffz1); cudaFree(acoffz2);
cudaFree(s_u0); cudaFree(s_u1);
cudaFree(s_w0); cudaFree(s_w1);
cudaFree(s_P); cudaFree(s_Q);
cudaFree(s_px0); cudaFree(s_px1);
cudaFree(s_pz0); cudaFree(s_pz1);
cudaFree(s_qx0); cudaFree(s_qx1);
cudaFree(s_qz0); cudaFree(s_qz1);
cudaFree(g_u0); cudaFree(g_u1);
cudaFree(g_w0); cudaFree(g_w1);
cudaFree(g_P); cudaFree(g_Q);
cudaFree(g_px0); cudaFree(g_px1);
cudaFree(g_pz0); cudaFree(g_pz1);
cudaFree(g_qx0); cudaFree(g_qx1);
cudaFree(g_qz0); cudaFree(g_qz1);
cudaFree(shot_Dev);
cudaFree(P_bndr); cudaFree(Q_bndr);
cudaFree(migration);
cudaFree(illumination);
cudaFree(adcigs);
/***************host free*****************/
free(v); free(e); free(d);
free(shot_Hos); free(Atemp);
exit(0);
}
|
7b009d8b376217b99fb93aeddaba1d47ffa0bcb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <E.Rozenberg@cwi.nl>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#ifdef __HIPCC__
#define inline __host__ __device__ inline
#include <vdt/sin.h>
#undef inline
#else
#include <vdt/sin.h>
#endif
#include "DataFormats/Math/interface/approx_log.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
std::mt19937 eng;
std::mt19937 eng2;
std::uniform_real_distribution<float> rgen(0., 1.);
constexpr float myExp(float x) { return unsafe_expf<6>(x); }
constexpr float myLog(float x) { return unsafe_logf<6>(x); }
__host__ __device__ inline float mySin(float x) { return vdt::fast_sinf(x); }
constexpr int USEEXP = 0, USESIN = 1, USELOG = 2;
template <int USE, bool ADDY = false>
// __host__ __device__
constexpr float testFunc(float x, float y) {
float ret = 0;
if (USE == USEEXP)
ret = myExp(x);
else if (USE == USESIN)
ret = mySin(x);
else
ret = myLog(x);
return ADDY ? ret + y : ret;
}
template <int USE, bool ADDY>
__global__ void vectorOp(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY>
void vectorOpH(const float *A, const float *B, float *C, int numElements) {
for (int i = 0; i < numElements; ++i) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY = false>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
int numElements = 200000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector of " << numElements << " elements]\n";
auto h_A = std::make_unique<float[]>(numElements);
auto h_B = std::make_unique<float[]>(numElements);
auto h_C = std::make_unique<float[]>(numElements);
auto h_C2 = std::make_unique<float[]>(numElements);
std::generate(h_A.get(), h_A.get() + numElements, [&]() { return rgen(eng); });
std::generate(h_B.get(), h_B.get() + numElements, [&]() { return rgen(eng); });
delta -= (std::chrono::high_resolution_clock::now() - start);
auto d_A = cudautils::make_device_unique<float[]>(numElements, nullptr);
auto d_B = cudautils::make_device_unique<float[]>(numElements, nullptr);
auto d_C = cudautils::make_device_unique<float[]>(numElements, nullptr);
cudaCheck(hipMemcpy(d_A.get(), h_A.get(), size, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_B.get(), h_B.get(), size, hipMemcpyHostToDevice));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda alloc+copy took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Launch the Vector OP CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n";
delta -= (std::chrono::high_resolution_clock::now() - start);
cudautils::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudautils::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudaCheck(hipMemcpy(h_C.get(), d_C.get(), size, hipMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda copy back took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// on host now...
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Verify that the result vector is correct
double ave = 0;
int maxDiff = 0;
long long ndiff = 0;
double fave = 0;
float fmaxDiff = 0;
for (int i = 0; i < numElements; ++i) {
approx_math::binary32 g, c;
g.f = testFunc<USE, ADDY>(h_A[i], h_B[i]);
c.f = h_C[i];
auto diff = std::abs(g.i32 - c.i32);
maxDiff = ::max(diff, maxDiff);
ave += diff;
if (diff != 0)
++ndiff;
auto fdiff = std::abs(g.f - c.f);
fave += fdiff;
fmaxDiff = ::max(fdiff, fmaxDiff);
// if (diff>7)
// std::cerr << "Large diff at element " << i << ' ' << diff << ' ' << std::hexfloat
// << g.f << "!=" << c.f << "\n";
}
std::cout << "ndiff ave, max " << ndiff << ' ' << ave / numElements << ' ' << maxDiff << std::endl;
std::cout << "float ave, max " << fave / numElements << ' ' << fmaxDiff << std::endl;
if (!ndiff) {
std::cout << "Test PASSED\n";
std::cout << "SUCCESS" << std::endl;
}
hipDeviceSynchronize();
}
int main() {
requireCUDADevices();
try {
go<USEEXP>();
go<USESIN>();
go<USELOG>();
go<USELOG, true>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
| 7b009d8b376217b99fb93aeddaba1d47ffa0bcb0.cu | /**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <E.Rozenberg@cwi.nl>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#ifdef __CUDACC__
#define inline __host__ __device__ inline
#include <vdt/sin.h>
#undef inline
#else
#include <vdt/sin.h>
#endif
#include "DataFormats/Math/interface/approx_log.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
std::mt19937 eng;
std::mt19937 eng2;
std::uniform_real_distribution<float> rgen(0., 1.);
constexpr float myExp(float x) { return unsafe_expf<6>(x); }
constexpr float myLog(float x) { return unsafe_logf<6>(x); }
__host__ __device__ inline float mySin(float x) { return vdt::fast_sinf(x); }
constexpr int USEEXP = 0, USESIN = 1, USELOG = 2;
template <int USE, bool ADDY = false>
// __host__ __device__
constexpr float testFunc(float x, float y) {
float ret = 0;
if (USE == USEEXP)
ret = myExp(x);
else if (USE == USESIN)
ret = mySin(x);
else
ret = myLog(x);
return ADDY ? ret + y : ret;
}
template <int USE, bool ADDY>
__global__ void vectorOp(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY>
void vectorOpH(const float *A, const float *B, float *C, int numElements) {
for (int i = 0; i < numElements; ++i) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY = false>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
int numElements = 200000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector of " << numElements << " elements]\n";
auto h_A = std::make_unique<float[]>(numElements);
auto h_B = std::make_unique<float[]>(numElements);
auto h_C = std::make_unique<float[]>(numElements);
auto h_C2 = std::make_unique<float[]>(numElements);
std::generate(h_A.get(), h_A.get() + numElements, [&]() { return rgen(eng); });
std::generate(h_B.get(), h_B.get() + numElements, [&]() { return rgen(eng); });
delta -= (std::chrono::high_resolution_clock::now() - start);
auto d_A = cudautils::make_device_unique<float[]>(numElements, nullptr);
auto d_B = cudautils::make_device_unique<float[]>(numElements, nullptr);
auto d_C = cudautils::make_device_unique<float[]>(numElements, nullptr);
cudaCheck(cudaMemcpy(d_A.get(), h_A.get(), size, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_B.get(), h_B.get(), size, cudaMemcpyHostToDevice));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda alloc+copy took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Launch the Vector OP CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n";
delta -= (std::chrono::high_resolution_clock::now() - start);
cudautils::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudautils::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudaCheck(cudaMemcpy(h_C.get(), d_C.get(), size, cudaMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda copy back took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// on host now...
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Verify that the result vector is correct
double ave = 0;
int maxDiff = 0;
long long ndiff = 0;
double fave = 0;
float fmaxDiff = 0;
for (int i = 0; i < numElements; ++i) {
approx_math::binary32 g, c;
g.f = testFunc<USE, ADDY>(h_A[i], h_B[i]);
c.f = h_C[i];
auto diff = std::abs(g.i32 - c.i32);
maxDiff = std::max(diff, maxDiff);
ave += diff;
if (diff != 0)
++ndiff;
auto fdiff = std::abs(g.f - c.f);
fave += fdiff;
fmaxDiff = std::max(fdiff, fmaxDiff);
// if (diff>7)
// std::cerr << "Large diff at element " << i << ' ' << diff << ' ' << std::hexfloat
// << g.f << "!=" << c.f << "\n";
}
std::cout << "ndiff ave, max " << ndiff << ' ' << ave / numElements << ' ' << maxDiff << std::endl;
std::cout << "float ave, max " << fave / numElements << ' ' << fmaxDiff << std::endl;
if (!ndiff) {
std::cout << "Test PASSED\n";
std::cout << "SUCCESS" << std::endl;
}
cudaDeviceSynchronize();
}
int main() {
requireCUDADevices();
try {
go<USEEXP>();
go<USESIN>();
go<USELOG>();
go<USELOG, true>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
0334b6ec2c09cd06e9512abfacfcf305a66fecb0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Serialized trie implementation for C++/CUDA
* @file trie.cu
*/
#include "trie.cuh"
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/span.hpp>
#include <hip/hip_runtime.h>
#include <deque>
#include <string>
#include <vector>
namespace cudf {
namespace detail {
rmm::device_uvector<serial_trie_node> create_serialized_trie(const std::vector<std::string>& keys,
rmm::cuda_stream_view stream)
{
if (keys.empty()) { return rmm::device_uvector<serial_trie_node>{0, stream}; }
static constexpr int alphabet_size = std::numeric_limits<char>::max() + 1;
struct TreeTrieNode {
using TrieNodePtr = std::unique_ptr<TreeTrieNode>;
std::array<TrieNodePtr, alphabet_size> children;
bool is_end_of_word = false;
};
// Construct a tree-structured trie
// The trie takes a lot of memory, but the lookup is fast:
// allows direct addressing of children nodes
TreeTrieNode tree_trie;
for (const auto& key : keys) {
auto* current_node = &tree_trie;
for (const char character : key) {
if (current_node->children[character] == nullptr)
current_node->children[character] = std::make_unique<TreeTrieNode>();
current_node = current_node->children[character].get();
}
current_node->is_end_of_word = true;
}
struct IndexedTrieNode {
TreeTrieNode const* const pnode;
int16_t const idx;
IndexedTrieNode(TreeTrieNode const* const node, int16_t index) : pnode(node), idx(index) {}
};
// Serialize the tree trie
std::deque<IndexedTrieNode> to_visit;
std::vector<serial_trie_node> nodes;
// If the Tree trie matches empty strings, the root node is marked as 'end of word'.
// The first node in the serialized trie is also used to match empty strings, so we're
// initializing it using the `is_end_of_word` value from the root node.
nodes.push_back(serial_trie_node(trie_terminating_character, tree_trie.is_end_of_word));
// Add root node to queue. this node is not included to the serialized trie
to_visit.emplace_back(&tree_trie, -1);
while (!to_visit.empty()) {
const auto node_and_idx = to_visit.front();
const auto node = node_and_idx.pnode;
const auto idx = node_and_idx.idx;
to_visit.pop_front();
bool has_children = false;
for (size_t i = 0; i < node->children.size(); ++i) {
if (node->children[i] != nullptr) {
// Update the children offset of the parent node, unless at the root
if (idx >= 0 && nodes[idx].children_offset < 0) {
nodes[idx].children_offset = static_cast<uint16_t>(nodes.size() - idx);
}
// Add node to the trie
nodes.push_back(serial_trie_node(static_cast<char>(i), node->children[i]->is_end_of_word));
// Add to the queue, with the index within the new trie
to_visit.emplace_back(node->children[i].get(), static_cast<uint16_t>(nodes.size()) - 1);
has_children = true;
}
}
// Only add the terminating character if any nodes were added
if (has_children) { nodes.push_back(serial_trie_node(trie_terminating_character)); }
}
return cudf::detail::make_device_uvector_sync(
nodes, stream, rmm::mr::get_current_device_resource());
}
} // namespace detail
} // namespace cudf
| 0334b6ec2c09cd06e9512abfacfcf305a66fecb0.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Serialized trie implementation for C++/CUDA
* @file trie.cu
*/
#include "trie.cuh"
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/span.hpp>
#include <cuda_runtime.h>
#include <deque>
#include <string>
#include <vector>
namespace cudf {
namespace detail {
rmm::device_uvector<serial_trie_node> create_serialized_trie(const std::vector<std::string>& keys,
rmm::cuda_stream_view stream)
{
if (keys.empty()) { return rmm::device_uvector<serial_trie_node>{0, stream}; }
static constexpr int alphabet_size = std::numeric_limits<char>::max() + 1;
struct TreeTrieNode {
using TrieNodePtr = std::unique_ptr<TreeTrieNode>;
std::array<TrieNodePtr, alphabet_size> children;
bool is_end_of_word = false;
};
// Construct a tree-structured trie
// The trie takes a lot of memory, but the lookup is fast:
// allows direct addressing of children nodes
TreeTrieNode tree_trie;
for (const auto& key : keys) {
auto* current_node = &tree_trie;
for (const char character : key) {
if (current_node->children[character] == nullptr)
current_node->children[character] = std::make_unique<TreeTrieNode>();
current_node = current_node->children[character].get();
}
current_node->is_end_of_word = true;
}
struct IndexedTrieNode {
TreeTrieNode const* const pnode;
int16_t const idx;
IndexedTrieNode(TreeTrieNode const* const node, int16_t index) : pnode(node), idx(index) {}
};
// Serialize the tree trie
std::deque<IndexedTrieNode> to_visit;
std::vector<serial_trie_node> nodes;
// If the Tree trie matches empty strings, the root node is marked as 'end of word'.
// The first node in the serialized trie is also used to match empty strings, so we're
// initializing it using the `is_end_of_word` value from the root node.
nodes.push_back(serial_trie_node(trie_terminating_character, tree_trie.is_end_of_word));
// Add root node to queue. this node is not included to the serialized trie
to_visit.emplace_back(&tree_trie, -1);
while (!to_visit.empty()) {
const auto node_and_idx = to_visit.front();
const auto node = node_and_idx.pnode;
const auto idx = node_and_idx.idx;
to_visit.pop_front();
bool has_children = false;
for (size_t i = 0; i < node->children.size(); ++i) {
if (node->children[i] != nullptr) {
// Update the children offset of the parent node, unless at the root
if (idx >= 0 && nodes[idx].children_offset < 0) {
nodes[idx].children_offset = static_cast<uint16_t>(nodes.size() - idx);
}
// Add node to the trie
nodes.push_back(serial_trie_node(static_cast<char>(i), node->children[i]->is_end_of_word));
// Add to the queue, with the index within the new trie
to_visit.emplace_back(node->children[i].get(), static_cast<uint16_t>(nodes.size()) - 1);
has_children = true;
}
}
// Only add the terminating character if any nodes were added
if (has_children) { nodes.push_back(serial_trie_node(trie_terminating_character)); }
}
return cudf::detail::make_device_uvector_sync(
nodes, stream, rmm::mr::get_current_device_resource());
}
} // namespace detail
} // namespace cudf
|
ac0ba50dc50189717d4659ddc5585cbb8bbd7a23.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
#include <hip/hip_fp16.h>
#include "half_operator_overload.cuh"
#include "half2_operator_overload.cuh"
#include "newhalf.hpp"
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
printf("100 elems of finalvec: \n");
PrintAry(finalVec,100);
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
hipDeviceProp_t deviceProp;
int nDevCount = 0;
hipGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", hipGetErrorString(hipGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(half2 *m_cuda, half2 *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t +!)/2 ) return; // +1 for boundary
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
half* a_cuda_half = (half*) a_cuda;
half* m_cuda_half = (half*) m_cuda;
//~ *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
//~ m_cuda[(xidx+t+1)+Size/2*t] = a_cuda[(xidx+t+1)+Size/2*t] / __half2half2(a_cuda_half[Size*t+t]);
if ((t+1) %2 ==0 ){
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2+1) return;
//~ ((half2*)&(m_cuda_half[(t+1)+Size*t]))[xidx] = ((half2*)&(a_cuda_half[(t+1)+Size*t]))[xidx] / __half2half2(a_cuda_half[Size*t+t]);
m_cuda[((t+1)+Size*t)/2 +xidx]= a_cuda[((t+1)+Size*t)/2 + xidx] / __half2half2(a_cuda_half[Size*t+t]);
}
else{
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2) return;
if((Size-1-t)!=1)
m_cuda[((t+1+1)+Size*t)/2 + xidx] = a_cuda[((t+1+1)+Size*t)/2 + xidx] / __half2half2(a_cuda_half[Size*t+t]);
if (threadIdx.x == 0){
m_cuda_half[(t+1+xidx)+Size*t] = a_cuda_half[(t+1+xidx)+Size*t] / a_cuda_half[Size*t+t];
}
}
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(half2 *m_cuda, half2 *a_cuda, half2 *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
half* a_cuda_half = (half*) a_cuda;
half* b_cuda_half = (half*) b_cuda;
half* m_cuda_half = (half*) m_cuda;
int xidx = blockIdx.x * blockDim.x + threadIdx.x; //div-ed
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
if ((t+1) %2 ==0 ){
a_cuda[((1+t)+Size*(yidx+t))/2 + xidx] -= m_cuda[((1+t)+Size*t)/2 + xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
if(yidx == 0){
b_cuda[(1+t)/2 + xidx] -= m_cuda[((1+t)+Size*(yidx+t))/2 + xidx] * __half2half2(b_cuda_half[t]);
}
}
else{
if((Size-1-t)!=1){
a_cuda[((1+t+1)+Size*(yidx+t))/2 + xidx] -= m_cuda[((1+t+1)+Size*t)/2 + xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
//~ ((half2*)&(a_cuda_half[(1+t+1)+Size*(yidx+t)]))[xidx] -= ((half2*)&(m_cuda_half[(1+t+1)+Size*t]))[xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
//~ ((half2*)&(a_cuda_half[(1+t+1)]))[xidx] -= ((half2*)&(m_cuda_half[(1+t+1)]))[xidx] * __half2half2(a_cuda_half[t]);
if(yidx == 0){
b_cuda[(1+t+1)/2 + xidx] -= m_cuda[((1+t+1)+Size*(yidx+t))/2 + xidx] * __half2half2(b_cuda_half[t]);
}
}
if (xidx == 0){
a_cuda_half[(xidx+1+t)+Size*(yidx+t)] -= m_cuda_half[(xidx+1+t)+Size*t] * a_cuda_half[t+Size*(yidx+t)];
if(yidx == 0)
b_cuda_half[xidx+1+t] -= m_cuda_half[(xidx+1+t)+Size*(yidx+t)] * b_cuda_half[t];
}
}
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
//~ if(yidx == 0){
//~ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//~ //printf("xidx:%d,yidx:%d\n",xidx,yidx);
//~ // b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
//~ b_cuda[xidx+1+t] -= m_cuda[(xidx+1+t)+Size/2*(yidx+t)] * __half2half2(b_cuda_half[t]);
//~ }
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
half2 *m_cuda,*a_cuda,*b_cuda;
half_float::half *m_half, *a_half,*b_half;
m_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
a_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
b_half = (half_float::half*)malloc( Size * sizeof(half_float::half));
for(int i =0;i<Size;i ++){
for(int j = 0; j< Size ; j ++){
m_half[i*Size + j] = half_float::half(m[j*Size + i]);
a_half[i*Size + j] = half_float::half(a[j*Size + i]);
}
}
for (int i=0; i<Size; i++){
b_half[i] = half_float::half(b[i]);
}
// allocate memory on GPU
hipMalloc((void **) &m_cuda, Size * Size * sizeof(half));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(half));
hipMalloc((void **) &b_cuda, Size * sizeof(half));
// copy memory to GPU
hipMemcpy(m_cuda, m_half, Size * Size * sizeof(half),hipMemcpyHostToDevice );
hipMemcpy(a_cuda, a_half, Size * Size * sizeof(half),hipMemcpyHostToDevice );
hipMemcpy(b_cuda, b_half, Size * sizeof(half),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size/2);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d/2,blockSize2d);
//~ dim3 dimBlockXY(blockSize2d/2,1);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
//~ t=(Size-2);
for (t=0; t<(Size-1); t++)
//for (t=0; t<2; t++)
{
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
hipMemcpy(m_half, m_cuda, Size * Size * sizeof(half),hipMemcpyDeviceToHost );
hipMemcpy(a_half, a_cuda, Size * Size * sizeof(half),hipMemcpyDeviceToHost );
hipMemcpy(b_half, b_cuda, Size * sizeof(half),hipMemcpyDeviceToHost );
for(int i =0;i<Size;i ++){
for(int j = 0; j< Size ; j ++){
m[i*Size +j] = float(m_half[j*Size + i]);
a[i*Size +j] = float(a_half[j*Size + i]);
//~ printf ("%f, ",float(a_half[i*Size + j]));
}
//~ printf("\n");
}
//~ for (int i=0; i<Size; i++){
//~ b[i] = float(b_half[i]);
//~ printf ("%f, ",float(b_half[i]));
//~ }
//~ printf("\n");
free(m_half);
free(a_half);
free(b_half);
hipFree(m_cuda);
hipFree(a_cuda);
hipFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| ac0ba50dc50189717d4659ddc5585cbb8bbd7a23.cu | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#include <cuda_fp16.h>
#include "half_operator_overload.cuh"
#include "half2_operator_overload.cuh"
#include "newhalf.hpp"
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
printf("100 elems of finalvec: \n");
PrintAry(finalVec,100);
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(half2 *m_cuda, half2 *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t +!)/2 ) return; // +1 for boundary
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
half* a_cuda_half = (half*) a_cuda;
half* m_cuda_half = (half*) m_cuda;
//~ *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
//~ m_cuda[(xidx+t+1)+Size/2*t] = a_cuda[(xidx+t+1)+Size/2*t] / __half2half2(a_cuda_half[Size*t+t]);
if ((t+1) %2 ==0 ){
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2+1) return;
//~ ((half2*)&(m_cuda_half[(t+1)+Size*t]))[xidx] = ((half2*)&(a_cuda_half[(t+1)+Size*t]))[xidx] / __half2half2(a_cuda_half[Size*t+t]);
m_cuda[((t+1)+Size*t)/2 +xidx]= a_cuda[((t+1)+Size*t)/2 + xidx] / __half2half2(a_cuda_half[Size*t+t]);
}
else{
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2) return;
if((Size-1-t)!=1)
m_cuda[((t+1+1)+Size*t)/2 + xidx] = a_cuda[((t+1+1)+Size*t)/2 + xidx] / __half2half2(a_cuda_half[Size*t+t]);
if (threadIdx.x == 0){
m_cuda_half[(t+1+xidx)+Size*t] = a_cuda_half[(t+1+xidx)+Size*t] / a_cuda_half[Size*t+t];
}
}
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(half2 *m_cuda, half2 *a_cuda, half2 *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
//~ if(threadIdx.x + blockIdx.x * blockDim.x >= (Size-1-t)/2 && (Size-1-t)!=1) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
half* a_cuda_half = (half*) a_cuda;
half* b_cuda_half = (half*) b_cuda;
half* m_cuda_half = (half*) m_cuda;
int xidx = blockIdx.x * blockDim.x + threadIdx.x; //div-ed
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
if ((t+1) %2 ==0 ){
a_cuda[((1+t)+Size*(yidx+t))/2 + xidx] -= m_cuda[((1+t)+Size*t)/2 + xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
if(yidx == 0){
b_cuda[(1+t)/2 + xidx] -= m_cuda[((1+t)+Size*(yidx+t))/2 + xidx] * __half2half2(b_cuda_half[t]);
}
}
else{
if((Size-1-t)!=1){
a_cuda[((1+t+1)+Size*(yidx+t))/2 + xidx] -= m_cuda[((1+t+1)+Size*t)/2 + xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
//~ ((half2*)&(a_cuda_half[(1+t+1)+Size*(yidx+t)]))[xidx] -= ((half2*)&(m_cuda_half[(1+t+1)+Size*t]))[xidx] * __half2half2(a_cuda_half[t+Size*(yidx+t)]);
//~ ((half2*)&(a_cuda_half[(1+t+1)]))[xidx] -= ((half2*)&(m_cuda_half[(1+t+1)]))[xidx] * __half2half2(a_cuda_half[t]);
if(yidx == 0){
b_cuda[(1+t+1)/2 + xidx] -= m_cuda[((1+t+1)+Size*(yidx+t))/2 + xidx] * __half2half2(b_cuda_half[t]);
}
}
if (xidx == 0){
a_cuda_half[(xidx+1+t)+Size*(yidx+t)] -= m_cuda_half[(xidx+1+t)+Size*t] * a_cuda_half[t+Size*(yidx+t)];
if(yidx == 0)
b_cuda_half[xidx+1+t] -= m_cuda_half[(xidx+1+t)+Size*(yidx+t)] * b_cuda_half[t];
}
}
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
//~ if(yidx == 0){
//~ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//~ //printf("xidx:%d,yidx:%d\n",xidx,yidx);
//~ // b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
//~ b_cuda[xidx+1+t] -= m_cuda[(xidx+1+t)+Size/2*(yidx+t)] * __half2half2(b_cuda_half[t]);
//~ }
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
half2 *m_cuda,*a_cuda,*b_cuda;
half_float::half *m_half, *a_half,*b_half;
m_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
a_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
b_half = (half_float::half*)malloc( Size * sizeof(half_float::half));
for(int i =0;i<Size;i ++){
for(int j = 0; j< Size ; j ++){
m_half[i*Size + j] = half_float::half(m[j*Size + i]);
a_half[i*Size + j] = half_float::half(a[j*Size + i]);
}
}
for (int i=0; i<Size; i++){
b_half[i] = half_float::half(b[i]);
}
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(half));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(half));
cudaMalloc((void **) &b_cuda, Size * sizeof(half));
// copy memory to GPU
cudaMemcpy(m_cuda, m_half, Size * Size * sizeof(half),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a_half, Size * Size * sizeof(half),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b_half, Size * sizeof(half),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size/2);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d/2,blockSize2d);
//~ dim3 dimBlockXY(blockSize2d/2,1);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
//~ t=(Size-2);
for (t=0; t<(Size-1); t++)
//for (t=0; t<2; t++)
{
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m_half, m_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost );
cudaMemcpy(a_half, a_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost );
cudaMemcpy(b_half, b_cuda, Size * sizeof(half),cudaMemcpyDeviceToHost );
for(int i =0;i<Size;i ++){
for(int j = 0; j< Size ; j ++){
m[i*Size +j] = float(m_half[j*Size + i]);
a[i*Size +j] = float(a_half[j*Size + i]);
//~ printf ("%f, ",float(a_half[i*Size + j]));
}
//~ printf("\n");
}
//~ for (int i=0; i<Size; i++){
//~ b[i] = float(b_half[i]);
//~ printf ("%f, ",float(b_half[i]));
//~ }
//~ printf("\n");
free(m_half);
free(a_half);
free(b_half);
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
b51f0a92c999d702836e464b99ceb020f6514494.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudaPrint/cuPrintf.cu"
__global__ void diagonalComputation(int mainSequenceLength, int querySequenceLength, int *mainSeqAll, int *querySeq,
int *maxScoresPerBlock,
int Length,
int matchScore, int mismatchScore, int gapStartScore, int gapExtensionScore)
{
int hVal = 0, eVal = 0, fVal = 0; //Score of current cell being handled by this thread
extern __shared__ int buffer[];
//H_score_A -- for main scores of previous of previous diagonal
//H_score_B -- for scores of previous diagonal
int myRow = threadIdx.x;
int myBlock = blockIdx.x;
int *mainSeq = &mainSeqAll[myBlock * Length]; //This block should compute a portion of whole mainSequence
//Postion following buffers through shared memory
int *maxScores = &buffer[0*blockDim.x]; //Score keeper for threads of this Block
int *F_score = &buffer[1*blockDim.x];
int *H_score_A = &buffer[2*blockDim.x];
int *H_score_B = &buffer[3*blockDim.x];
maxScores[myRow] = 0;
H_score_B[myRow] = 0;
__syncthreads();
//Begin computing scores
int tidPos = 0; //current column position of tid
int myQueryChar = querySeq[myRow];
for (int i=1; i<= mainSequenceLength + querySequenceLength -1; i++)
{
if (tidPos + myRow < i && tidPos < mainSequenceLength) //Should this thread compute?
{
//Horizontal dependency
eVal = max(eVal - gapExtensionScore, H_score_B[myRow] - gapStartScore);
//Vertical Dependency
int upperFValue = (myRow==0)? 0 : F_score[myRow-1];
int upperHValue = (myRow==0)? 0 : H_score_B[myRow -1];
fVal = max(upperFValue - gapExtensionScore, upperHValue - gapStartScore);
//Diagonal Dependency
int simScore = (mainSeq[tidPos] == myQueryChar)? matchScore : mismatchScore;
int diagonalHValue = (myRow==0)? 0 : H_score_A[myRow -1];
hVal = diagonalHValue + simScore;
//Maxima
hVal = max(max(eVal, fVal), max(hVal, 0));
tidPos++;
}
__syncthreads(); //To make sure this diagonal is computed
//Save the values
H_score_A[myRow] = H_score_B[myRow];
__syncthreads(); //To make sure A is saved before editing B
F_score[myRow] = fVal;
H_score_B[myRow] = hVal;
maxScores[myRow] = max(maxScores[myRow], hVal);
__syncthreads(); //To make sure values are saved before the computation of next diagonal
}
//Save the maximum score of this block
if(myRow==0)
{
maxScoresPerBlock[myBlock] = 0;
for (int i=0; i< querySequenceLength; i++)
maxScoresPerBlock[myBlock] = max( maxScoresPerBlock[myBlock], maxScores[i]);
}
}
| b51f0a92c999d702836e464b99ceb020f6514494.cu | #include "../include/cudaPrint/cuPrintf.cu"
__global__ void diagonalComputation(int mainSequenceLength, int querySequenceLength, int *mainSeqAll, int *querySeq,
int *maxScoresPerBlock,
int Length,
int matchScore, int mismatchScore, int gapStartScore, int gapExtensionScore)
{
int hVal = 0, eVal = 0, fVal = 0; //Score of current cell being handled by this thread
extern __shared__ int buffer[];
//H_score_A -- for main scores of previous of previous diagonal
//H_score_B -- for scores of previous diagonal
int myRow = threadIdx.x;
int myBlock = blockIdx.x;
int *mainSeq = &mainSeqAll[myBlock * Length]; //This block should compute a portion of whole mainSequence
//Postion following buffers through shared memory
int *maxScores = &buffer[0*blockDim.x]; //Score keeper for threads of this Block
int *F_score = &buffer[1*blockDim.x];
int *H_score_A = &buffer[2*blockDim.x];
int *H_score_B = &buffer[3*blockDim.x];
maxScores[myRow] = 0;
H_score_B[myRow] = 0;
__syncthreads();
//Begin computing scores
int tidPos = 0; //current column position of tid
int myQueryChar = querySeq[myRow];
for (int i=1; i<= mainSequenceLength + querySequenceLength -1; i++)
{
if (tidPos + myRow < i && tidPos < mainSequenceLength) //Should this thread compute?
{
//Horizontal dependency
eVal = max(eVal - gapExtensionScore, H_score_B[myRow] - gapStartScore);
//Vertical Dependency
int upperFValue = (myRow==0)? 0 : F_score[myRow-1];
int upperHValue = (myRow==0)? 0 : H_score_B[myRow -1];
fVal = max(upperFValue - gapExtensionScore, upperHValue - gapStartScore);
//Diagonal Dependency
int simScore = (mainSeq[tidPos] == myQueryChar)? matchScore : mismatchScore;
int diagonalHValue = (myRow==0)? 0 : H_score_A[myRow -1];
hVal = diagonalHValue + simScore;
//Maxima
hVal = max(max(eVal, fVal), max(hVal, 0));
tidPos++;
}
__syncthreads(); //To make sure this diagonal is computed
//Save the values
H_score_A[myRow] = H_score_B[myRow];
__syncthreads(); //To make sure A is saved before editing B
F_score[myRow] = fVal;
H_score_B[myRow] = hVal;
maxScores[myRow] = max(maxScores[myRow], hVal);
__syncthreads(); //To make sure values are saved before the computation of next diagonal
}
//Save the maximum score of this block
if(myRow==0)
{
maxScoresPerBlock[myBlock] = 0;
for (int i=0; i< querySequenceLength; i++)
maxScoresPerBlock[myBlock] = max( maxScoresPerBlock[myBlock], maxScores[i]);
}
}
|
fcac34a0128a0ca65aec407500265170712d2e22.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// exp.cpp
#include <nbla/cuda/function/exp.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(Exp, ::exp(x), dy *exp(x), false);
}
| fcac34a0128a0ca65aec407500265170712d2e22.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// exp.cpp
#include <nbla/cuda/function/exp.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(Exp, std::exp(x), dy *exp(x), false);
}
|
4f534ab126dcd0c7606bc707089bcf86b44b4e95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by lidan on 2020/9/20.
//
#include "book.cuh"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_c ) );
return 0;
}
| 4f534ab126dcd0c7606bc707089bcf86b44b4e95.cu | //
// Created by lidan on 2020/9/20.
//
#include "book.cuh"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice ) );
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_c ) );
return 0;
}
|
c2df7488fe884bfcc3c366b8cf1d7643ac6c9a3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define MODID pre
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
#include "gradops_cdf.cuh"
#include "dervfields_cdf.cuh"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
__global__ void computevels_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
//if(i<(ni) && j >1 && j<(nj-1))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
//if(i>1 && i<(ni-1) && j<(nj))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
//if(i>1 && i<(ni-1) && j<(nj))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
#endif
}
}
}
__syncthreads();
}
__global__ void computepres_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
/* switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
{
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
}
break;
#endif
}*/
}
}
__syncthreads();
}
__global__ void computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
/* for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
}
}*/
__syncthreads();
if(iindex==0)
{
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//p->cmax=0.0;
for(ii[0]=0;ii[0]<((p->n[0]));ii[0]++)
for(ii[1]=0;ii[1]<((p->n[1]));ii[1]++)
#ifdef USE_SAC_3D
for(ii[2]>1;ii[2]<((p->n[2])-2);ii[2]++)
#endif
{
computecmax3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
}
// }
}
__syncthreads();
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory
*/
__global__ void fastcomputemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>sdata[tid])
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
//sdata[tid] += sdata[tid + s];
//if(sdata[tid]>sdata[0])
// sdata[0]=sdata[tid];
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid+s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
//sdata[tid] += sdata[tid + s];
//if(sdata[tid]>sdata[0])
// sdata[0]=sdata[tid];
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid+s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reduction0computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
// if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
p->cmax=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
}
}
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reductiona0computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
if(iindex<dimp)
sdata[tid]=wd[iindex+(dimp*cfast)];
/* if(iindex<dimp)
if(wd[iindex+(dimp*cfast)]>(p->cmax))
sdata[tid]=wd[iindex+(dimp*cfast)];*/
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
p->cmax=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
}
__global__ void computec_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
p->cmax=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
}
}
__syncthreads();
}
__global__ void computedervfields_parallel(struct params *p, real *wmod, real *wd, int order)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
if(order == 0)
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
for(int f=vel1; f<NDERV; f++)
#else
for(int f=vel1; f<=pkb; f++)
#endif
wd[fencode3_cdf(p,ii,f)]=0;
#ifdef USE_SAC_3D
for(int f=rho; f<NVAR; f++)
wmod[fencode3_cdf(p,ii,f)+dimp*NVAR]=wmod[fencode3_cdf(p,ii,f)];
#else
for(int f=rho; f<NVAR; f++)
wmod[fencode3_cdf(p,ii,f)+dimp*NVAR]=wmod[fencode3_cdf(p,ii,f)];
#endif
}
}
__syncthreads();
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if( ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
//computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
//computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computej3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computebdotv3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computedivb3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cdf(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucomputedervfields(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////hipSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( computedervfields_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order);
hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputevels(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////hipSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( computevels_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, dir);
hipDeviceSynchronize();
// hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputemaxc(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////hipSetDevice(selectedDevice);
int nit=1;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
//reductiona0computemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
//computemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
// fastcomputemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
for(int i=0; i<nit;i++)
{
hipLaunchKernelGGL(( computemaxc_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, dir);
hipDeviceSynchronize();
}
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputec(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////hipSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( computec_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, dir);
hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputepres(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////hipSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( computepres_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, dir);
hipDeviceSynchronize();
// hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| c2df7488fe884bfcc3c366b8cf1d7643ac6c9a3a.cu | //#define MODID pre
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
#include "gradops_cdf.cuh"
#include "dervfields_cdf.cuh"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
__global__ void computevels_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
//if(i<(ni) && j >1 && j<(nj-1))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
//if(i>1 && i<(ni-1) && j<(nj))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
//if(i>1 && i<(ni-1) && j<(nj))
computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
break;
#endif
}
}
}
__syncthreads();
}
__global__ void computepres_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
/* switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
{
#ifdef ADIABHYDRO
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
{
computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
}
break;
#endif
}*/
}
}
__syncthreads();
}
__global__ void computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
/* for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
}
}*/
__syncthreads();
if(iindex==0)
{
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//p->cmax=0.0;
for(ii[0]=0;ii[0]<((p->n[0]));ii[0]++)
for(ii[1]=0;ii[1]<((p->n[1]));ii[1]++)
#ifdef USE_SAC_3D
for(ii[2]>1;ii[2]<((p->n[2])-2);ii[2]++)
#endif
{
computecmax3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
}
// }
}
__syncthreads();
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory
*/
__global__ void fastcomputemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>sdata[tid])
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
//sdata[tid] += sdata[tid + s];
//if(sdata[tid]>sdata[0])
// sdata[0]=sdata[tid];
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid+s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
//sdata[tid] += sdata[tid + s];
//if(sdata[tid]>sdata[0])
// sdata[0]=sdata[tid];
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid+s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reduction0computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
// if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
sdata[tid]=wd[fencode3_cdf(p,ii,cfast)];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
p->cmax=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
}
}
}
//from http://www.nvidia.com/object/cuda_sample_data-parallel.html#reduction
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reductiona0computemaxc_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
// perform first level of reduction,
// reading from global memory, writing to shared memory
sdata[tid]=0.0;
if(iindex<dimp)
sdata[tid]=wd[iindex+(dimp*cfast)];
/* if(iindex<dimp)
if(wd[iindex+(dimp*cfast)]>(p->cmax))
sdata[tid]=wd[iindex+(dimp*cfast)];*/
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
if(sdata[tid+s]>sdata[tid])
sdata[tid]=sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) p->cmax = sdata[0];
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
//computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
if(wd[fencode3_cdf(p,ii,cfast)]>(p->cmax))
p->cmax=wd[fencode3_cdf(p,ii,cfast)];
}
}
__syncthreads();
}
__global__ void computec_parallel(struct params *p, real *wmod, real *wd, int order, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
p->cmax=0.0;
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec3_cdf(wmod+(order*dimp*NVAR),wd,p,ii,dir);
//p->cmax=0.0;
}
}
__syncthreads();
}
__global__ void computedervfields_parallel(struct params *p, real *wmod, real *wd, int order)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp,kpg;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni/((p->npgp[1])*(p->npgp[0])));
jp=(iindex-(kp*(nj*ni/((p->npgp[1])*(p->npgp[0])))))/(ni/(p->npgp[0]));
ip=iindex-(kp*nj*ni/((p->npgp[1])*(p->npgp[0])))-(jp*(ni/(p->npgp[0])));
#endif
#if defined USE_SAC || defined ADIABHYDRO
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
#endif
if(order == 0)
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
for(int f=vel1; f<NDERV; f++)
#else
for(int f=vel1; f<=pkb; f++)
#endif
wd[fencode3_cdf(p,ii,f)]=0;
#ifdef USE_SAC_3D
for(int f=rho; f<NVAR; f++)
wmod[fencode3_cdf(p,ii,f)+dimp*NVAR]=wmod[fencode3_cdf(p,ii,f)];
#else
for(int f=rho; f<NVAR; f++)
wmod[fencode3_cdf(p,ii,f)+dimp*NVAR]=wmod[fencode3_cdf(p,ii,f)];
#endif
}
}
__syncthreads();
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
#ifdef USE_SAC_3D
for(kpg=0;kpg<(p->npgp[2]);kpg++)
#endif
{
ii[0]=ip*(p->npgp[0])+ipg;
ii[1]=jp*(p->npgp[1])+jpg;
#ifdef USE_SAC_3D
ii[2]=kp*(p->npgp[2])+kpg;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if( ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
#ifdef ADIABHYDRO
//computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#else
//computevel3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computej3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computepk3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computept3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
computebdotv3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
//computedivb3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
#endif
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cdf(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucomputedervfields(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////cudaSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
computedervfields_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order);
cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputevels(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////cudaSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
computevels_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
cudaThreadSynchronize();
// cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputemaxc(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////cudaSetDevice(selectedDevice);
int nit=1;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
//reductiona0computemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
//computemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
// fastcomputemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
for(int i=0; i<nit;i++)
{
computemaxc_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
cudaThreadSynchronize();
}
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputec(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////cudaSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
computec_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cucomputepres(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
////cudaSetDevice(selectedDevice);
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
//dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
computepres_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dir);
cudaThreadSynchronize();
// cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
639d66d06d2800d9c0832e6be86e09e3f5dee41c.hip | // !!! This is a file automatically generated by hipify!!!
/** CUDA code demo, modified by Jiuzhou Tang
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
/** Usage: ./vectorAdd 500000 256 (Number of elements, number of threads per block)
**/
#include <stdio.h>
#include <time.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
# include <rocblas.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
__host__ void vectorAdd_host(const float *A, const float *B, float *C, int numElements)
{
int i;
for (i=0; i<numElements;i++)
{
C[i]=A[i]+B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char * argv[])
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
clock_t startTime, endTime;
float time_elapsed=0;
hipEvent_t start,stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
// read the input parameters from command line
int numElements=atoi(argv[1]);
// Print the vector length to be used, and compute its size
//int numElements = 5000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
hipEventRecord( start,0); //
vectorAdd_host(h_A,h_B,h_C,numElements);
hipEventRecord( stop,0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed,start,stop); //
printf("cpu serial computing time%f(ms)\n",time_elapsed);
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
hipEventRecord( start,0); //
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
//int threadsPerBlock = 256;
int threadsPerBlock = atoi(argv[2]);
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//hipEventRecord( start1,0); //
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//endTime = clock();
hipEventRecord( stop,0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed,start,stop); //
printf("cuda vectoradd event time%f(ms)\n",time_elapsed);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// test cudablas lib
hipblasHandle_t handle;
hipblasCreate (&handle);
float alpha=1.0;
hipEventRecord( start,0); //
hipblasSetVector(numElements,sizeof(float),h_A,1,d_A,1);
hipblasSetVector(numElements,sizeof(float),h_B,1,d_B,1);
hipblasSaxpy(handle,numElements,&alpha,d_A,1,d_B,1);
hipblasGetVector(numElements,sizeof(float),d_B,1,h_C,1);
hipEventRecord( stop,0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed,start,stop); //
printf("cuda blas event time%f(ms)\n",time_elapsed);
// Verify that the result vector by cublas is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 639d66d06d2800d9c0832e6be86e09e3f5dee41c.cu | /** CUDA code demo, modified by Jiuzhou Tang
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
/** Usage: ./vectorAdd 500000 256 (Number of elements, number of threads per block)
**/
#include <stdio.h>
#include <time.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
# include <cublas_v2.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
__host__ void vectorAdd_host(const float *A, const float *B, float *C, int numElements)
{
int i;
for (i=0; i<numElements;i++)
{
C[i]=A[i]+B[i];
}
}
/**
* Host main routine
*/
int main(int argc, char * argv[])
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
clock_t startTime, endTime;
float time_elapsed=0;
cudaEvent_t start,stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
// read the input parameters from command line
int numElements=atoi(argv[1]);
// Print the vector length to be used, and compute its size
//int numElements = 5000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
cudaEventRecord( start,0); //记录当前时间
vectorAdd_host(h_A,h_B,h_C,numElements);
cudaEventRecord( stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
printf("cpu serial computing time:%f(ms)\n",time_elapsed);
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
cudaEventRecord( start,0); //记录当前时间
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
//int threadsPerBlock = 256;
int threadsPerBlock = atoi(argv[2]);
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//cudaEventRecord( start1,0); //记录当前时间
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//endTime = clock();
cudaEventRecord( stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
printf("cuda vectoradd event time:%f(ms)\n",time_elapsed);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// test cudablas lib
cublasHandle_t handle;
cublasCreate (&handle);
float alpha=1.0;
cudaEventRecord( start,0); //记录当前时间
cublasSetVector(numElements,sizeof(float),h_A,1,d_A,1);
cublasSetVector(numElements,sizeof(float),h_B,1,d_B,1);
cublasSaxpy_v2(handle,numElements,&alpha,d_A,1,d_B,1);
cublasGetVector(numElements,sizeof(float),d_B,1,h_C,1);
cudaEventRecord( stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
printf("cuda blas event time:%f(ms)\n",time_elapsed);
// Verify that the result vector by cublas is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
ecc5dcab911759cbdd1a190c3167a361ba81598e.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
using namespace cooperative_groups;
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
grid_group g = this_grid();
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
synchronize(g);
A[idx] = temp;
}
| ecc5dcab911759cbdd1a190c3167a361ba81598e.cu | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
using namespace cooperative_groups;
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
grid_group g = this_grid();
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
synchronize(g);
A[idx] = temp;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.