hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
766c0657cf5822007804e1bec71157da8112ff94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
//printf("nx: %d, ny: %d, ix: %d, iy: %d, idx: %d\n", nx, ny, ix, iy, idx);
if (ix<nx && iy<ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
//printf("GPU Add: %f + %f = %f.\n", MatA[idx], MatB[idx], MatC[idx]);
}
} | 766c0657cf5822007804e1bec71157da8112ff94.cu | #include "includes.h"
__global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
//printf("nx: %d, ny: %d, ix: %d, iy: %d, idx: %d\n", nx, ny, ix, iy, idx);
if (ix<nx && iy<ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
//printf("GPU Add: %f + %f = %f.\n", MatA[idx], MatB[idx], MatC[idx]);
}
} |
5ba9242b724772a995ca99953b29c5e55091ef49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#include "zgemm_fermi_kernels.h"
/***************************************************************************//**
Purpose
-------
ZGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or
op( X ) = X**T or
op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA magma_trans_t.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( A ) = A.
- = MagmaTrans: op( A ) = A**T.
- = MagmaConjTrans: op( A ) = A**H.
@param[in]
transB magma_trans_t.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( B ) = B.
- = MagmaTrans: op( B ) = B**T.
- = MagmaConjTrans: op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha COMPLEX_16
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB COMPLEX_16 array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX_16 array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_zgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_zgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc, queue );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_Amagma_z.normalized = false;
tex_ref_Amagma_z.filterMode = hipFilterModePoint;
tex_ref_Amagma_z.addressMode[0] = hipAddressModeClamp;
tex_ref_Bmagma_z.normalized = false;
tex_ref_Bmagma_z.filterMode = hipFilterModePoint;
tex_ref_Bmagma_z.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_Amagma_z, dA, sizeA*sizeof(magmaDoubleComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_Bmagma_z, dB, sizeB*sizeof(magmaDoubleComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_Amagma_z );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_Amagma_z );
hipUnbindTexture( tex_ref_Bmagma_z );
#endif
}
| 5ba9242b724772a995ca99953b29c5e55091ef49.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#include "zgemm_fermi_kernels.h"
/***************************************************************************//**
Purpose
-------
ZGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or
op( X ) = X**T or
op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA magma_trans_t.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( A ) = A.
- = MagmaTrans: op( A ) = A**T.
- = MagmaConjTrans: op( A ) = A**H.
@param[in]
transB magma_trans_t.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( B ) = B.
- = MagmaTrans: op( B ) = B**T.
- = MagmaConjTrans: op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha COMPLEX_16
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB COMPLEX_16 array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX_16 array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_zgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_zgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc, queue );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_Amagma_z.normalized = false;
tex_ref_Amagma_z.filterMode = cudaFilterModePoint;
tex_ref_Amagma_z.addressMode[0] = cudaAddressModeClamp;
tex_ref_Bmagma_z.normalized = false;
tex_ref_Bmagma_z.filterMode = cudaFilterModePoint;
tex_ref_Bmagma_z.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_Amagma_z, dA, sizeA*sizeof(magmaDoubleComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_Bmagma_z, dB, sizeB*sizeof(magmaDoubleComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_Amagma_z );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
zgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
zgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
zgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
zgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
zgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
zgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
zgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
zgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
zgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_Amagma_z );
cudaUnbindTexture( tex_ref_Bmagma_z );
#endif
}
|
722d057401ed6fbf4f135c65c0c77c4902efd125.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define NUM_ELEM 128
using namespace std;
// TODO
// workers will compute sum on first N elements
__global__ void worker(int *data, int *result)
{
// TODO, compute sum and store in result
for(int i = 0; i != data[threadIdx.x]; ++i)
result[threadIdx.x] += data[i];
}
// TODO
// master will launch threads to compute sum on first N elements
__global__ void master(int N, int *data, int *result)
{
// TODO, schedule worker threads
hipLaunchKernelGGL(( worker), dim3(1), dim3(N), 0, 0, data, result);
}
void generateData(int *data, int num) {
srand(time(0));
for(int i = 0; i < num; i++) {
data[i] = rand() % 8 + 2;
}
}
void print(int *data, int num) {
for(int i = 0; i < num; i++) {
cout << data[i] << " ";
}
cout << endl;
}
// TASK check
// each element result[i] should be sum of first data[i] elements of data[i]
bool checkResult(int *data, int num, int *result) {
for(int i = 0; i < num; i++) {
int sum = 0;
for(int j = 0; j < data[i]; j++) {
sum += data[j];
}
if(result[i] != sum) {
cout << "Error at " << i << ", requested sum of first "
<< data[i] << " elem, got " << result[i] << endl;
return false;
}
}
return true;
}
int main(int argc, char *argv[])
{
int *data = NULL;
hipMallocManaged(&data, NUM_ELEM * sizeof(int));
int *result = NULL;
hipMallocManaged(&result, NUM_ELEM * sizeof(int));
generateData(data, NUM_ELEM);
// TODO schedule master threads and pass data/result/num
hipLaunchKernelGGL(( master), dim3(1), dim3(1) , 0, 0, NUM_ELEM, data, result);
hipDeviceSynchronize();
print(data, NUM_ELEM);
print(result, NUM_ELEM);
if(checkResult(data, NUM_ELEM, result)) {
cout << "Result OK" << endl;
} else {
cout << "Result ERR" << endl;
}
hipFree(data);
hipFree(result);
return 0;
}
| 722d057401ed6fbf4f135c65c0c77c4902efd125.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define NUM_ELEM 128
using namespace std;
// TODO
// workers will compute sum on first N elements
__global__ void worker(int *data, int *result)
{
// TODO, compute sum and store in result
for(int i = 0; i != data[threadIdx.x]; ++i)
result[threadIdx.x] += data[i];
}
// TODO
// master will launch threads to compute sum on first N elements
__global__ void master(int N, int *data, int *result)
{
// TODO, schedule worker threads
worker<<<1, N>>>(data, result);
}
void generateData(int *data, int num) {
srand(time(0));
for(int i = 0; i < num; i++) {
data[i] = rand() % 8 + 2;
}
}
void print(int *data, int num) {
for(int i = 0; i < num; i++) {
cout << data[i] << " ";
}
cout << endl;
}
// TASK check
// each element result[i] should be sum of first data[i] elements of data[i]
bool checkResult(int *data, int num, int *result) {
for(int i = 0; i < num; i++) {
int sum = 0;
for(int j = 0; j < data[i]; j++) {
sum += data[j];
}
if(result[i] != sum) {
cout << "Error at " << i << ", requested sum of first "
<< data[i] << " elem, got " << result[i] << endl;
return false;
}
}
return true;
}
int main(int argc, char *argv[])
{
int *data = NULL;
cudaMallocManaged(&data, NUM_ELEM * sizeof(int));
int *result = NULL;
cudaMallocManaged(&result, NUM_ELEM * sizeof(int));
generateData(data, NUM_ELEM);
// TODO schedule master threads and pass data/result/num
master<<< 1, 1 >>>(NUM_ELEM, data, result);
cudaDeviceSynchronize();
print(data, NUM_ELEM);
print(result, NUM_ELEM);
if(checkResult(data, NUM_ELEM, result)) {
cout << "Result OK" << endl;
} else {
cout << "Result ERR" << endl;
}
cudaFree(data);
cudaFree(result);
return 0;
}
|
06096e11e12c9d7e52a02e047d1b784ad9cbd07b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
hipLaunchKernelGGL(( GammaKernel) , dim3(gridSize), dim3(blockSize) , 0, 0, d_rgbaImage, d_outputImage, numRows, numCols, gamma);
hipDeviceSynchronize();
} | 06096e11e12c9d7e52a02e047d1b784ad9cbd07b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
cudaDeviceSynchronize();
} |
c52e7b302e01816c9471afb6d60b2af78a126282.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <THH/THHTensorSort.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int z = blockIdx.z; z < outer_dim; z += gridDim.z){
int idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) sorted_indices[idx]) * stride + z * stride_before;
const int grad_row = ((int) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
AT_INDEX_ERROR("index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
AT_INDEX_ERROR("index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
AT_INDEX_ERROR("too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.view(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.div_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "index_put_ with accumulation is not supported on large tensors, number of source elements =", self.numel(), "file a support request on github");
TORCH_CHECK(value.numel() < std::numeric_limits<int>::max(), "index_put_ with accumulation is not supported on large tensors, number of source elements =", value.numel(), "file a support request on github");
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool,
value_.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
THCudaCheck(hipGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
} //at
} //native
| c52e7b302e01816c9471afb6d60b2af78a126282.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <THC/THCTensorSort.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int z = blockIdx.z; z < outer_dim; z += gridDim.z){
int idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) sorted_indices[idx]) * stride + z * stride_before;
const int grad_row = ((int) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
AT_INDEX_ERROR("index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
AT_INDEX_ERROR("index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
AT_INDEX_ERROR("too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.view(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.div_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "index_put_ with accumulation is not supported on large tensors, number of source elements =", self.numel(), "file a support request on github");
TORCH_CHECK(value.numel() < std::numeric_limits<int>::max(), "index_put_ with accumulation is not supported on large tensors, number of source elements =", value.numel(), "file a support request on github");
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool,
value_.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
THCudaCheck(cudaGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
} //at
} //native
|
53798010c205e509ced86132d00cb0211579b815.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void mul_kernel(int n, const float *x, const float *y, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = x[i] * y[i];
}
void mul(int n, const float *x, const float *y, float *z) {
hipLaunchKernelGGL(( mul_kernel), dim3((n+255)/256), dim3(256), 0, 0, n, x, y, z);
}
| 53798010c205e509ced86132d00cb0211579b815.cu | __global__
void mul_kernel(int n, const float *x, const float *y, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = x[i] * y[i];
}
void mul(int n, const float *x, const float *y, float *z) {
mul_kernel<<<(n+255)/256, 256>>>(n, x, y, z);
}
|
5c3749adc95988f073ee721e235260641bb699e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size);
int getNum() {
hipDeviceProp_t prop;
int num;
hipGetDeviceCount(&num);
hipGetDeviceProperties(&prop, 0);
printf("thread num = %d\n", prop.maxThreadsPerBlock);
printf("thread num = %d , %d,%d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
//printf("thread num = %d", prop.maxThreadsPerBlock);
return prop.maxThreadsPerBlock;
}
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;//1blockthredid p31
c[i] = a[i] + b[i];
}
__global__ void conv(float *img, float *kernel,
float *result, int width, int height, int kernelSize) {
int ti = threadIdx.x;
int bi = blockIdx.x;
int id = (bi * blockDim.x+ti);// 1024 is thread num
//int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id >= width*height)
{
return;
}
int row = id / width;
int col = id % width;
for (int i = 0; i < 3; ++i)
{
for (int j = 0; j < 3; ++j) {
float imgValue = 0.0;
int curRow = row - kernelSize /2 +i;
int curCol = col - kernelSize / 2 + j;
//if (curCol < 0 || curRow < 0 || curCol >= width || curRow >= height) {}
if (curRow >= 0 && curCol >= 0 && curRow < height && curCol < width)
{
imgValue = img[curRow * width + curCol];
//printf("%2.0f", result[id]);
result[id] += kernel[i * kernelSize + j] * imgValue;
}
//printf("!!!!!!!!%2.0f", result[id]);
//result[id] += kernel[i * kernelSize + j] * imgValue;
}
}
//printf("!!!!!!!!%2.0f", result[0]);
}
int main() {
int width = 1920;
int height = 1080;
float* img = new float[width * height];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++) {
img[row * width + col] = (col + row) % 256;
}
}
int kernelSize = 3;
float* kernel = new float[kernelSize * kernelSize];
for (int i = 0; i < kernelSize * kernelSize; i++)
{
kernel[i] = i % kernelSize - 1;
}
float *d_img;
float *d_kernel;
float *d_result;
hipMalloc((void**)&d_img, width * height * sizeof(float));
hipMalloc((void**)&d_kernel, kernelSize * kernelSize* sizeof(float)); //3*3
hipMalloc((void**)&d_result, width * height * sizeof(float));
hipMemcpy(d_img,img, width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_kernel, img, 9 * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy((void**)&d_result, img, width * height * sizeof(float), hipMemcpyHostToDevice);
//need num of block and thread
int threadNum = getNum();//thread num
int blockNum = (width * height - 0.5) / threadNum + 1;
conv << <blockNum, threadNum >> >
(d_img, d_kernel, d_result, width, height, kernelSize);
float* result = new float[width * height];
hipMemcpy(result, d_result,
width * height * sizeof(float), hipMemcpyDeviceToHost);
//visualization
printf("img\n");
for (int row = 0; row < 10; row++)
{
for (int col = 0; col < 10; col++) {
//printf("%2.0f%2.0f ___", row, col);
printf("%2.0f ", img[col + row * width]);
}
printf("\n");
}
//printf("kernel\n");
printf("kernel\n");
for (int row = 0; row < kernelSize; row++)
{
for (int col = 0; col < kernelSize; col++) {
printf("%2.0f ", kernel[col + row * kernelSize]);
}
printf("\n");
}
printf("result\n");
for (int row = 0; row < 5; row++)
{
for (int col = 0; col < 5; col++) {
//printf("%2.0f%2.0f ___", row,col);
printf("%2.0f ", result[col + row * width]);
}
printf("\n");
}
printf("result: %2.0f",result[0]);
return 0;
} | 5c3749adc95988f073ee721e235260641bb699e1.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size);
int getNum() {
cudaDeviceProp prop;
int num;
cudaGetDeviceCount(&num);
cudaGetDeviceProperties(&prop, 0);
printf("thread num = %d\n", prop.maxThreadsPerBlock);
printf("thread num = %d , %d,%d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
//printf("thread num = %d", prop.maxThreadsPerBlock);
return prop.maxThreadsPerBlock;
}
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;//由于使用1个block,个线程,所以需要使用thredid 参考书p31页
c[i] = a[i] + b[i];
}
__global__ void conv(float *img, float *kernel,
float *result, int width, int height, int kernelSize) {
int ti = threadIdx.x;
int bi = blockIdx.x;
int id = (bi * blockDim.x+ti);// 1024 is thread num
//int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id >= width*height)
{
return;
}
int row = id / width;
int col = id % width;
for (int i = 0; i < 3; ++i)
{
for (int j = 0; j < 3; ++j) {
float imgValue = 0.0;
int curRow = row - kernelSize /2 +i;
int curCol = col - kernelSize / 2 + j;
//if (curCol < 0 || curRow < 0 || curCol >= width || curRow >= height) {}
if (curRow >= 0 && curCol >= 0 && curRow < height && curCol < width)
{
imgValue = img[curRow * width + curCol];
//printf("%2.0f", result[id]);
result[id] += kernel[i * kernelSize + j] * imgValue;
}
//printf("!!!!!!!!%2.0f", result[id]);
//result[id] += kernel[i * kernelSize + j] * imgValue;
}
}
//printf("!!!!!!!!%2.0f", result[0]);
}
int main() {
int width = 1920;
int height = 1080;
float* img = new float[width * height];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++) {
img[row * width + col] = (col + row) % 256;
}
}
int kernelSize = 3;
float* kernel = new float[kernelSize * kernelSize];
for (int i = 0; i < kernelSize * kernelSize; i++)
{
kernel[i] = i % kernelSize - 1;
}
float *d_img;
float *d_kernel;
float *d_result;
cudaMalloc((void**)&d_img, width * height * sizeof(float));
cudaMalloc((void**)&d_kernel, kernelSize * kernelSize* sizeof(float)); //3*3
cudaMalloc((void**)&d_result, width * height * sizeof(float));
cudaMemcpy(d_img,img, width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, img, 9 * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy((void**)&d_result, img, width * height * sizeof(float), cudaMemcpyHostToDevice);
//need num of block and thread
int threadNum = getNum();//thread num
int blockNum = (width * height - 0.5) / threadNum + 1;
conv << <blockNum, threadNum >> >
(d_img, d_kernel, d_result, width, height, kernelSize);
float* result = new float[width * height];
cudaMemcpy(result, d_result,
width * height * sizeof(float), cudaMemcpyDeviceToHost);
//visualization
printf("img\n");
for (int row = 0; row < 10; row++)
{
for (int col = 0; col < 10; col++) {
//printf("%2.0f,%2.0f ___", row, col);
printf("%2.0f ", img[col + row * width]);
}
printf("\n");
}
//printf("kernel\n");
printf("kernel\n");
for (int row = 0; row < kernelSize; row++)
{
for (int col = 0; col < kernelSize; col++) {
printf("%2.0f ", kernel[col + row * kernelSize]);
}
printf("\n");
}
printf("result\n");
for (int row = 0; row < 5; row++)
{
for (int col = 0; col < 5; col++) {
//printf("%2.0f,%2.0f ___", row,col);
printf("%2.0f ", result[col + row * width]);
}
printf("\n");
}
printf("result: %2.0f",result[0]);
return 0;
} |
651aca16bc6bb5db48bce5d62a36121c9ed96492.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void computeGpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(hipError_t e);
//----------------------------------- CUDA function definitions -----------------------------------------
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
allocateAndInitializeAB();
// matrix matrix multiplication in the CPU
clock_t start = clock();
computeCpuMMM();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the CPU: %f seconds\n", elapsed);
start = clock();
computeGpuMMM();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsed);
compareHostAndGpuOutput();
return 0;
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
float val;
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
A_GPU = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
A[index] = val;
A_GPU[index] = val;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
B_GPU = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
B[index] = val;
B_GPU[index] = val;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(hipMalloc((void **) &A_GPU, sizeofA));
check_error(hipMemcpy(A_GPU, A, sizeofA, hipMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(hipMalloc((void **) &B_GPU, sizeofB));
check_error(hipMemcpy(B_GPU, B, sizeofB, hipMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(hipMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(hipMemcpy(C_CPU, C_GPU, sizeofC, hipMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeGpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_GPU = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C_GPU[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C_GPU[c_index] += A_GPU[a_index] * B_GPU[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
printf("%d\n", i);
if (fabs(C[i] - C_GPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
| 651aca16bc6bb5db48bce5d62a36121c9ed96492.cu | #include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void computeGpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
//----------------------------------- CUDA function definitions -----------------------------------------
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
allocateAndInitializeAB();
// matrix matrix multiplication in the CPU
clock_t start = clock();
computeCpuMMM();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the CPU: %f seconds\n", elapsed);
start = clock();
computeGpuMMM();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsed);
compareHostAndGpuOutput();
return 0;
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
float val;
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
A_GPU = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
A[index] = val;
A_GPU[index] = val;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
B_GPU = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
B[index] = val;
B_GPU[index] = val;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeGpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_GPU = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C_GPU[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C_GPU[c_index] += A_GPU[a_index] * B_GPU[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
printf("%d\n", i);
if (fabs(C[i] - C_GPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
9d1aa2a2e6549bc1888c9217e02a4e7b34deee0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// #include <hip/hip_runtime.h>
// #include <device_launch_parameters.h>
__global__ void print(int* input) {
// printf("gridDim.x : %d , gridDim.y : %d , gridDim.z : %d \n", gridDim.x,
// gridDim.y, gridDim.z);
// printf("blockDim.x : %d , blockDim.y : %d , blockDim.z : %d \n",
// blockDim.x, blockDim.y, blockDim.z);
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x,
threadIdx.y, threadIdx.z);
printf("blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d \n", blockIdx.x,
blockIdx.y, blockIdx.z);
}
__global__ void print_details(int* input) {
/*int gid =
(threadIdx.z * blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) +
(threadIdx.x) +
(blockDim.x * blockDim.y * blockDim.z * blockIdx.x) +
(blockDim.x * blockDim.y * blockDim.z * gridDim.x * blockIdx.y) +
(blockDim.x * blockDim.y * blockDim.z * gridDim.x * gridDim.y *
blockIdx.z);*/
int tid = (threadIdx.z * blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x;
int num_of_thread_in_a_block = blockDim.x * blockDim.y * blockDim.z;
int block_offset = num_of_thread_in_a_block * blockIdx.x;
int num_of_threads_in_a_row = num_of_thread_in_a_block * gridDim.x;
int row_offset = num_of_threads_in_a_row * blockIdx.y;
int num_of_thread_in_xy = num_of_thread_in_a_block * gridDim.x * gridDim.y;
int z_offset = num_of_thread_in_xy * blockIdx.z;
int gid = tid + block_offset + row_offset + z_offset;
printf("tid : %d , gid : %d , value : %d \n", tid, gid, input[gid]);
}
int main() {
int x = 4;
int y = 16;
int z = 32;
int size = x * y * z;
int byte_size = size * sizeof(int);
int* h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++) {
h_input[i] = (int)(rand() & 0xff);
}
int* d_input;
hipMalloc((void**)&d_input, byte_size);
hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice);
dim3 block_dim(2, 8, 16);
dim3 grid_dim(x / block_dim.x, y / block_dim.y, z / block_dim.z);
// print_details <<< grid_dim, block_dim >>> (d_input);
hipLaunchKernelGGL(( print), dim3(grid_dim), dim3(block_dim), 0, 0, d_input);
hipDeviceSynchronize();
hipFree(d_input);
free(h_input);
hipDeviceReset();
}
| 9d1aa2a2e6549bc1888c9217e02a4e7b34deee0b.cu | #include <stdio.h>
// #include <cuda_runtime.h>
// #include <device_launch_parameters.h>
__global__ void print(int* input) {
// printf("gridDim.x : %d , gridDim.y : %d , gridDim.z : %d \n", gridDim.x,
// gridDim.y, gridDim.z);
// printf("blockDim.x : %d , blockDim.y : %d , blockDim.z : %d \n",
// blockDim.x, blockDim.y, blockDim.z);
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x,
threadIdx.y, threadIdx.z);
printf("blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d \n", blockIdx.x,
blockIdx.y, blockIdx.z);
}
__global__ void print_details(int* input) {
/*int gid =
(threadIdx.z * blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) +
(threadIdx.x) +
(blockDim.x * blockDim.y * blockDim.z * blockIdx.x) +
(blockDim.x * blockDim.y * blockDim.z * gridDim.x * blockIdx.y) +
(blockDim.x * blockDim.y * blockDim.z * gridDim.x * gridDim.y *
blockIdx.z);*/
int tid = (threadIdx.z * blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x;
int num_of_thread_in_a_block = blockDim.x * blockDim.y * blockDim.z;
int block_offset = num_of_thread_in_a_block * blockIdx.x;
int num_of_threads_in_a_row = num_of_thread_in_a_block * gridDim.x;
int row_offset = num_of_threads_in_a_row * blockIdx.y;
int num_of_thread_in_xy = num_of_thread_in_a_block * gridDim.x * gridDim.y;
int z_offset = num_of_thread_in_xy * blockIdx.z;
int gid = tid + block_offset + row_offset + z_offset;
printf("tid : %d , gid : %d , value : %d \n", tid, gid, input[gid]);
}
int main() {
int x = 4;
int y = 16;
int z = 32;
int size = x * y * z;
int byte_size = size * sizeof(int);
int* h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++) {
h_input[i] = (int)(rand() & 0xff);
}
int* d_input;
cudaMalloc((void**)&d_input, byte_size);
cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);
dim3 block_dim(2, 8, 16);
dim3 grid_dim(x / block_dim.x, y / block_dim.y, z / block_dim.z);
// print_details <<< grid_dim, block_dim >>> (d_input);
print<<<grid_dim, block_dim>>>(d_input);
cudaDeviceSynchronize();
cudaFree(d_input);
free(h_input);
cudaDeviceReset();
}
|
7348a61a9a185a0bf0086128385174faf3097745.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 - 2021 MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "gmm.h"
#include "gmm_cuda_linalg.cuh"
#define EPSILON 1e-5
#define BLOCK_SIZE 32
#define TILE(SIZE, STRIDE) ((((SIZE) - 1)/(STRIDE)) + 1)
template<int warp_count, int load_count>
__global__ void CovarianceReductionKernel(int gaussian_index, const float* g_image, const int* g_alpha, float* g_matrices, int element_count)
{
constexpr int block_size = warp_count * 32;
__shared__ float s_matrix_component[warp_count];
int batch_index = blockIdx.z;
const float* g_batch_image = g_image + batch_index * element_count * CHANNEL_COUNT;
const int* g_batch_alpha = g_alpha + batch_index * element_count;
float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * gridDim.x;
int local_index = threadIdx.x;
int block_index = blockIdx.x;
int warp_index = local_index >> 5;
int lane_index = local_index & 31;
int global_index = local_index + block_index * block_size * load_count;
int matrix_offset = (gaussian_index * gridDim.x + block_index) * GMM_COMPONENT_COUNT;
float matrix[MATRIX_COMPONENT_COUNT];
for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++)
{
matrix[i] = 0;
}
for (int load = 0; load < load_count; load++)
{
global_index += load * block_size;
if (global_index < element_count)
{
int my_alpha = g_batch_alpha[global_index];
if (my_alpha != -1)
{
if (gaussian_index == (my_alpha & 15) + (my_alpha >> 4) * MIXTURE_COUNT)
{
float feature[CHANNEL_COUNT + 1];
feature[0] = 1;
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i + 1] = g_batch_image[global_index + i * element_count];
}
for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++)
{
for (int j = i; j < CHANNEL_COUNT + 1; j++, index++)
{
matrix[index] += feature[i] * feature[j];
}
}
}
}
}
}
__syncthreads();
for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++)
{
float matrix_component = matrix[i];
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1);
if (lane_index == 0)
{
s_matrix_component[warp_index] = matrix_component;
}
__syncthreads();
if (warp_index == 0)
{
matrix_component = s_matrix_component[lane_index];
if (warp_count >= 32) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16); }
if (warp_count >= 16) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8); }
if (warp_count >= 8) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4); }
if (warp_count >= 4) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2); }
if (warp_count >= 2) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1); }
if (lane_index == 0)
{
g_batch_matrices[matrix_offset + i] = matrix_component;
}
}
__syncthreads();
}
}
template<int warp_count, bool invert_matrix>
__global__ void CovarianceFinalizationKernel(const float* g_matrices, float* g_gmm, int matrix_count)
{
constexpr int block_size = warp_count * 32;
__shared__ float s_matrix_component[warp_count];
__shared__ float s_gmm[GMM_COMPONENT_COUNT];
int batch_index = blockIdx.z;
const float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * matrix_count;
float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
int local_index = threadIdx.x;
int warp_index = local_index >> 5;
int lane_index = local_index & 31;
int gmm_index = blockIdx.x;
int matrix_offset = gmm_index * matrix_count;
int load_count = TILE(matrix_count, block_size);
float norm_factor = 1.0f;
for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++)
{
for (int j = i; j < CHANNEL_COUNT + 1; j++, index++)
{
float matrix_component = 0.0f;
for(int load = 0; load < load_count; load++)
{
int matrix_index = local_index + load * block_size;
if(matrix_index < matrix_count)
{
matrix_component += g_batch_matrices[(matrix_offset + matrix_index) * GMM_COMPONENT_COUNT + index];
}
}
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1);
if (lane_index == 0)
{
s_matrix_component[warp_index] = matrix_component;
}
__syncthreads();
if (warp_index == 0)
{
matrix_component = s_matrix_component[lane_index];
if (warp_count >= 32) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16); }
if (warp_count >= 16) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8); }
if (warp_count >= 8) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4); }
if (warp_count >= 4) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2); }
if (warp_count >= 2) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1); }
if (lane_index == 0)
{
float constant = i == 0 ? 0.0f : s_gmm[i] * s_gmm[j];
if (i != 0 && i == j)
{
constant -= EPSILON;
}
s_gmm[index] = norm_factor * matrix_component - constant;
if (index == 0 && matrix_component > 0)
{
norm_factor = 1.0f / matrix_component;
}
}
}
__syncthreads();
}
}
float* matrix = s_gmm + (CHANNEL_COUNT + 1);
float* det_ptr = s_gmm + MATRIX_COMPONENT_COUNT;
if (local_index == 0)
{
float square_mat[CHANNEL_COUNT][CHANNEL_COUNT];
float cholesky_mat[CHANNEL_COUNT][CHANNEL_COUNT];
for(int i = 0; i < CHANNEL_COUNT; i++)
{
for(int j = 0; j < CHANNEL_COUNT; j++)
{
square_mat[i][j] = 0.0f;
cholesky_mat[i][j] = 0.0f;
}
}
to_square(matrix, square_mat);
cholesky(square_mat, cholesky_mat);
*det_ptr = chol_det(cholesky_mat);
if (invert_matrix)
{
chol_inv(cholesky_mat, square_mat);
to_triangle(square_mat, matrix);
}
}
if (local_index < GMM_COMPONENT_COUNT)
{
g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + local_index] = s_gmm[local_index];
}
}
struct GMMSplit_t
{
int idx;
float threshold;
float eigenvector[CHANNEL_COUNT];
};
// 1 Block, 32xMIXTURE_COUNT
__global__ void GMMFindSplit(GMMSplit_t *gmmSplit, int gmmK, float *gmm)
{
int batch_index = blockIdx.z;
float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
int gmm_idx = threadIdx.x * MIXTURE_COUNT + threadIdx.y;
float eigenvalue = 0;
float eigenvector[CHANNEL_COUNT];
if (threadIdx.x < gmmK)
{
float* matrix = g_batch_gmm + gmm_idx * GMM_COMPONENT_COUNT + (CHANNEL_COUNT + 1);
largest_eigenpair(matrix, eigenvector, &eigenvalue);
}
float max_value = eigenvalue;
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 16));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 8));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 4));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 2));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 1));
if (max_value == eigenvalue)
{
GMMSplit_t split;
float* average_feature = gmm + gmm_idx * GMM_COMPONENT_COUNT + 1;
split.idx = threadIdx.x;
split.threshold = scalar_prod(average_feature, eigenvector);
for (int i = 0; i < CHANNEL_COUNT; i++)
{
split.eigenvector[i] = eigenvector[i];
}
g_batch_gmmSplit[threadIdx.y] = split;
}
}
#define DO_SPLIT_DEGENERACY 4
__global__ void GMMDoSplit(const GMMSplit_t *gmmSplit, int k, const float *image, int *alpha, int element_count)
{
__shared__ GMMSplit_t s_gmmSplit[MIXTURE_COUNT];
int batch_index = blockIdx.z;
const GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
int* g_batch_alpha = alpha + batch_index * element_count;
int *s_linear = (int *) s_gmmSplit;
int *g_linear = (int *) g_batch_gmmSplit;
if (threadIdx.x < MIXTURE_COUNT * sizeof(GMMSplit_t))
{
s_linear[threadIdx.x] = g_linear[threadIdx.x];
}
__syncthreads();
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE * DO_SPLIT_DEGENERACY;
for (int i = 0; i < DO_SPLIT_DEGENERACY; i++)
{
index += BLOCK_SIZE;
if (index < element_count)
{
int my_alpha = g_batch_alpha[index];
if(my_alpha != -1)
{
int select = my_alpha & 15;
int gmm_idx = my_alpha >> 4;
if (gmm_idx == s_gmmSplit[select].idx)
{
// in the split cluster now
float feature[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i] = g_batch_image[index + i * element_count];
}
float value = scalar_prod(s_gmmSplit[select].eigenvector, feature);
if (value > s_gmmSplit[select].threshold)
{
// assign pixel to new cluster
g_batch_alpha[index] = k + select;
}
}
}
}
}
}
// Single block, 32xMIXTURE_COUNT
__global__ void GMMcommonTerm(float *g_gmm)
{
int batch_index = blockIdx.z;
float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
int gmm_index = (threadIdx.x * MIXTURE_COUNT) + threadIdx.y;
float gmm_n = threadIdx.x < MIXTURE_SIZE ? g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT] : 0.0f;
float sum = gmm_n;
sum += __shfl_xor_sync(0xffffffff, sum, 1);
sum += __shfl_xor_sync(0xffffffff, sum, 2);
sum += __shfl_xor_sync(0xffffffff, sum, 4);
sum += __shfl_xor_sync(0xffffffff, sum, 8);
sum += __shfl_xor_sync(0xffffffff, sum, 16);
if (threadIdx.x < MIXTURE_SIZE)
{
float det = g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] + EPSILON;
float commonTerm = det > 0.0f ? gmm_n / (sqrtf(det) * sum) : gmm_n / sum;
g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] = commonTerm;
}
}
__device__ float GMMTerm(float* feature, const float *gmm)
{
const float* average_feature = gmm + 1;
const float* matrix = gmm + CHANNEL_COUNT + 1;
float diff[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
diff[i] = feature[i] - average_feature[i];
}
float value = 0.0f;
for (int index = 0, i = 0; i < CHANNEL_COUNT; i++)
{
for (int j = i; j < CHANNEL_COUNT; j++, index++)
{
float term = diff[i] * diff[j] * matrix[index];
value += i == j ? term : 2 * term;
}
}
return gmm[MATRIX_COMPONENT_COUNT] * expf(-0.5 * value);
}
__global__ void GMMDataTermKernel(const float *image, const float *gmm, float* output, int element_count)
{
int batch_index = blockIdx.z;
const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
const float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
float* g_batch_output = output + batch_index * element_count * MIXTURE_COUNT;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= element_count) return;
float feature[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i] = g_batch_image[index + i * element_count];
}
float weights[MIXTURE_COUNT];
float weight_total = 0.0f;
for(int i = 0; i < MIXTURE_COUNT; i++)
{
float mixture_weight = 0.0f;
for(int j = 0; j < MIXTURE_SIZE; j++)
{
mixture_weight += GMMTerm(feature, &g_batch_gmm[(MIXTURE_COUNT * j + i) * GMM_COMPONENT_COUNT]);
}
weights[i] = mixture_weight;
weight_total += mixture_weight;
}
for(int i = 0; i < MIXTURE_COUNT; i++)
{
// protecting against pixels with 0 in all mixtures
float final_weight = weight_total > 0.0f ? weights[i] / weight_total : 0.0f;
g_batch_output[index + i * element_count] = final_weight;
}
}
#define THREADS 512
#define WARPS 16
#define BLOCK (WARPS << 5)
#define LOAD 4
void GMMInitialize(const float *image, int *alpha, float *gmm, float *scratch_mem, unsigned int batch_count, unsigned int element_count)
{
unsigned int block_count = TILE(element_count, BLOCK * LOAD);
float* block_gmm_scratch = scratch_mem;
GMMSplit_t* gmm_split_scratch = (GMMSplit_t*) scratch_mem;
int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
for (unsigned int k = MIXTURE_COUNT; k < gmm_N; k+=MIXTURE_COUNT)
{
for (unsigned int i = 0; i < k; ++i)
{
hipLaunchKernelGGL(( CovarianceReductionKernel<WARPS, LOAD>), dim3({block_count), dim3(1), batch_count}, BLOCK, i, image, alpha, block_gmm_scratch, element_count);
}
hipLaunchKernelGGL(( CovarianceFinalizationKernel<WARPS, false>), dim3({k), dim3(1), batch_count}, BLOCK, block_gmm_scratch, gmm, block_count);
hipLaunchKernelGGL(( GMMFindSplit), dim3({1), dim3(1), batch_count}, dim3(BLOCK_SIZE, MIXTURE_COUNT), gmm_split_scratch, k / MIXTURE_COUNT, gmm);
hipLaunchKernelGGL(( GMMDoSplit), dim3({TILE(element_count, BLOCK_SIZE * DO_SPLIT_DEGENERACY)), dim3(1), batch_count}, BLOCK_SIZE, gmm_split_scratch, (k / MIXTURE_COUNT) << 4, image, alpha, element_count);
}
}
void GMMUpdate(const float *image, int *alpha, float *gmm, float *scratch_mem, unsigned int batch_count, unsigned int element_count)
{
unsigned int block_count = TILE(element_count, BLOCK * LOAD);
float* block_gmm_scratch = scratch_mem;
unsigned int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
for (unsigned int i = 0; i < gmm_N; ++i)
{
hipLaunchKernelGGL(( CovarianceReductionKernel<WARPS, LOAD>), dim3({block_count), dim3(1), batch_count}, BLOCK, i, image, alpha, block_gmm_scratch, element_count);
}
hipLaunchKernelGGL(( CovarianceFinalizationKernel<WARPS, true>), dim3({gmm_N), dim3(1), batch_count}, BLOCK, block_gmm_scratch, gmm, block_count);
hipLaunchKernelGGL(( GMMcommonTerm), dim3({1), dim3(1), batch_count}, dim3(BLOCK_SIZE, MIXTURE_COUNT), gmm);
}
void GMMDataTerm(const float *image, const float *gmm, float* output, unsigned int batch_count, unsigned int element_count)
{
dim3 block(BLOCK_SIZE, 1);
dim3 grid(TILE(element_count, BLOCK_SIZE), 1, batch_count);
hipLaunchKernelGGL(( GMMDataTermKernel), dim3(grid), dim3(block), 0, 0, image, gmm, output, element_count);
}
void learn_cuda(const float* input, const int* labels, float* gmm, float* scratch_memory, unsigned int batch_count, unsigned int element_count)
{
int* alpha = (int*)scratch_memory;
float* scratch_mem = scratch_memory + batch_count * element_count;
hipMemcpyAsync(alpha, labels, batch_count * element_count * sizeof(int), hipMemcpyDeviceToDevice);
GMMInitialize(input, alpha, gmm, scratch_mem, batch_count, element_count);
GMMUpdate(input, alpha, gmm, scratch_mem, batch_count, element_count);
}
void apply_cuda(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count)
{
GMMDataTerm(input, gmm, output, batch_count, element_count);
}
| 7348a61a9a185a0bf0086128385174faf3097745.cu | /*
Copyright 2020 - 2021 MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "gmm.h"
#include "gmm_cuda_linalg.cuh"
#define EPSILON 1e-5
#define BLOCK_SIZE 32
#define TILE(SIZE, STRIDE) ((((SIZE) - 1)/(STRIDE)) + 1)
template<int warp_count, int load_count>
__global__ void CovarianceReductionKernel(int gaussian_index, const float* g_image, const int* g_alpha, float* g_matrices, int element_count)
{
constexpr int block_size = warp_count * 32;
__shared__ float s_matrix_component[warp_count];
int batch_index = blockIdx.z;
const float* g_batch_image = g_image + batch_index * element_count * CHANNEL_COUNT;
const int* g_batch_alpha = g_alpha + batch_index * element_count;
float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * gridDim.x;
int local_index = threadIdx.x;
int block_index = blockIdx.x;
int warp_index = local_index >> 5;
int lane_index = local_index & 31;
int global_index = local_index + block_index * block_size * load_count;
int matrix_offset = (gaussian_index * gridDim.x + block_index) * GMM_COMPONENT_COUNT;
float matrix[MATRIX_COMPONENT_COUNT];
for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++)
{
matrix[i] = 0;
}
for (int load = 0; load < load_count; load++)
{
global_index += load * block_size;
if (global_index < element_count)
{
int my_alpha = g_batch_alpha[global_index];
if (my_alpha != -1)
{
if (gaussian_index == (my_alpha & 15) + (my_alpha >> 4) * MIXTURE_COUNT)
{
float feature[CHANNEL_COUNT + 1];
feature[0] = 1;
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i + 1] = g_batch_image[global_index + i * element_count];
}
for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++)
{
for (int j = i; j < CHANNEL_COUNT + 1; j++, index++)
{
matrix[index] += feature[i] * feature[j];
}
}
}
}
}
}
__syncthreads();
for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++)
{
float matrix_component = matrix[i];
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1);
if (lane_index == 0)
{
s_matrix_component[warp_index] = matrix_component;
}
__syncthreads();
if (warp_index == 0)
{
matrix_component = s_matrix_component[lane_index];
if (warp_count >= 32) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16); }
if (warp_count >= 16) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8); }
if (warp_count >= 8) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4); }
if (warp_count >= 4) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2); }
if (warp_count >= 2) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1); }
if (lane_index == 0)
{
g_batch_matrices[matrix_offset + i] = matrix_component;
}
}
__syncthreads();
}
}
template<int warp_count, bool invert_matrix>
__global__ void CovarianceFinalizationKernel(const float* g_matrices, float* g_gmm, int matrix_count)
{
constexpr int block_size = warp_count * 32;
__shared__ float s_matrix_component[warp_count];
__shared__ float s_gmm[GMM_COMPONENT_COUNT];
int batch_index = blockIdx.z;
const float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * matrix_count;
float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
int local_index = threadIdx.x;
int warp_index = local_index >> 5;
int lane_index = local_index & 31;
int gmm_index = blockIdx.x;
int matrix_offset = gmm_index * matrix_count;
int load_count = TILE(matrix_count, block_size);
float norm_factor = 1.0f;
for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++)
{
for (int j = i; j < CHANNEL_COUNT + 1; j++, index++)
{
float matrix_component = 0.0f;
for(int load = 0; load < load_count; load++)
{
int matrix_index = local_index + load * block_size;
if(matrix_index < matrix_count)
{
matrix_component += g_batch_matrices[(matrix_offset + matrix_index) * GMM_COMPONENT_COUNT + index];
}
}
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2);
matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1);
if (lane_index == 0)
{
s_matrix_component[warp_index] = matrix_component;
}
__syncthreads();
if (warp_index == 0)
{
matrix_component = s_matrix_component[lane_index];
if (warp_count >= 32) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 16); }
if (warp_count >= 16) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 8); }
if (warp_count >= 8) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 4); }
if (warp_count >= 4) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 2); }
if (warp_count >= 2) { matrix_component += __shfl_down_sync(0xffffffff, matrix_component, 1); }
if (lane_index == 0)
{
float constant = i == 0 ? 0.0f : s_gmm[i] * s_gmm[j];
if (i != 0 && i == j)
{
constant -= EPSILON;
}
s_gmm[index] = norm_factor * matrix_component - constant;
if (index == 0 && matrix_component > 0)
{
norm_factor = 1.0f / matrix_component;
}
}
}
__syncthreads();
}
}
float* matrix = s_gmm + (CHANNEL_COUNT + 1);
float* det_ptr = s_gmm + MATRIX_COMPONENT_COUNT;
if (local_index == 0)
{
float square_mat[CHANNEL_COUNT][CHANNEL_COUNT];
float cholesky_mat[CHANNEL_COUNT][CHANNEL_COUNT];
for(int i = 0; i < CHANNEL_COUNT; i++)
{
for(int j = 0; j < CHANNEL_COUNT; j++)
{
square_mat[i][j] = 0.0f;
cholesky_mat[i][j] = 0.0f;
}
}
to_square(matrix, square_mat);
cholesky(square_mat, cholesky_mat);
*det_ptr = chol_det(cholesky_mat);
if (invert_matrix)
{
chol_inv(cholesky_mat, square_mat);
to_triangle(square_mat, matrix);
}
}
if (local_index < GMM_COMPONENT_COUNT)
{
g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + local_index] = s_gmm[local_index];
}
}
struct GMMSplit_t
{
int idx;
float threshold;
float eigenvector[CHANNEL_COUNT];
};
// 1 Block, 32xMIXTURE_COUNT
__global__ void GMMFindSplit(GMMSplit_t *gmmSplit, int gmmK, float *gmm)
{
int batch_index = blockIdx.z;
float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
int gmm_idx = threadIdx.x * MIXTURE_COUNT + threadIdx.y;
float eigenvalue = 0;
float eigenvector[CHANNEL_COUNT];
if (threadIdx.x < gmmK)
{
float* matrix = g_batch_gmm + gmm_idx * GMM_COMPONENT_COUNT + (CHANNEL_COUNT + 1);
largest_eigenpair(matrix, eigenvector, &eigenvalue);
}
float max_value = eigenvalue;
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 16));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 8));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 4));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 2));
max_value = max(max_value, __shfl_xor_sync(0xffffffff, max_value, 1));
if (max_value == eigenvalue)
{
GMMSplit_t split;
float* average_feature = gmm + gmm_idx * GMM_COMPONENT_COUNT + 1;
split.idx = threadIdx.x;
split.threshold = scalar_prod(average_feature, eigenvector);
for (int i = 0; i < CHANNEL_COUNT; i++)
{
split.eigenvector[i] = eigenvector[i];
}
g_batch_gmmSplit[threadIdx.y] = split;
}
}
#define DO_SPLIT_DEGENERACY 4
__global__ void GMMDoSplit(const GMMSplit_t *gmmSplit, int k, const float *image, int *alpha, int element_count)
{
__shared__ GMMSplit_t s_gmmSplit[MIXTURE_COUNT];
int batch_index = blockIdx.z;
const GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT;
const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
int* g_batch_alpha = alpha + batch_index * element_count;
int *s_linear = (int *) s_gmmSplit;
int *g_linear = (int *) g_batch_gmmSplit;
if (threadIdx.x < MIXTURE_COUNT * sizeof(GMMSplit_t))
{
s_linear[threadIdx.x] = g_linear[threadIdx.x];
}
__syncthreads();
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE * DO_SPLIT_DEGENERACY;
for (int i = 0; i < DO_SPLIT_DEGENERACY; i++)
{
index += BLOCK_SIZE;
if (index < element_count)
{
int my_alpha = g_batch_alpha[index];
if(my_alpha != -1)
{
int select = my_alpha & 15;
int gmm_idx = my_alpha >> 4;
if (gmm_idx == s_gmmSplit[select].idx)
{
// in the split cluster now
float feature[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i] = g_batch_image[index + i * element_count];
}
float value = scalar_prod(s_gmmSplit[select].eigenvector, feature);
if (value > s_gmmSplit[select].threshold)
{
// assign pixel to new cluster
g_batch_alpha[index] = k + select;
}
}
}
}
}
}
// Single block, 32xMIXTURE_COUNT
__global__ void GMMcommonTerm(float *g_gmm)
{
int batch_index = blockIdx.z;
float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
int gmm_index = (threadIdx.x * MIXTURE_COUNT) + threadIdx.y;
float gmm_n = threadIdx.x < MIXTURE_SIZE ? g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT] : 0.0f;
float sum = gmm_n;
sum += __shfl_xor_sync(0xffffffff, sum, 1);
sum += __shfl_xor_sync(0xffffffff, sum, 2);
sum += __shfl_xor_sync(0xffffffff, sum, 4);
sum += __shfl_xor_sync(0xffffffff, sum, 8);
sum += __shfl_xor_sync(0xffffffff, sum, 16);
if (threadIdx.x < MIXTURE_SIZE)
{
float det = g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] + EPSILON;
float commonTerm = det > 0.0f ? gmm_n / (sqrtf(det) * sum) : gmm_n / sum;
g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] = commonTerm;
}
}
__device__ float GMMTerm(float* feature, const float *gmm)
{
const float* average_feature = gmm + 1;
const float* matrix = gmm + CHANNEL_COUNT + 1;
float diff[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
diff[i] = feature[i] - average_feature[i];
}
float value = 0.0f;
for (int index = 0, i = 0; i < CHANNEL_COUNT; i++)
{
for (int j = i; j < CHANNEL_COUNT; j++, index++)
{
float term = diff[i] * diff[j] * matrix[index];
value += i == j ? term : 2 * term;
}
}
return gmm[MATRIX_COMPONENT_COUNT] * expf(-0.5 * value);
}
__global__ void GMMDataTermKernel(const float *image, const float *gmm, float* output, int element_count)
{
int batch_index = blockIdx.z;
const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT;
const float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT;
float* g_batch_output = output + batch_index * element_count * MIXTURE_COUNT;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= element_count) return;
float feature[CHANNEL_COUNT];
for (int i = 0; i < CHANNEL_COUNT; i++)
{
feature[i] = g_batch_image[index + i * element_count];
}
float weights[MIXTURE_COUNT];
float weight_total = 0.0f;
for(int i = 0; i < MIXTURE_COUNT; i++)
{
float mixture_weight = 0.0f;
for(int j = 0; j < MIXTURE_SIZE; j++)
{
mixture_weight += GMMTerm(feature, &g_batch_gmm[(MIXTURE_COUNT * j + i) * GMM_COMPONENT_COUNT]);
}
weights[i] = mixture_weight;
weight_total += mixture_weight;
}
for(int i = 0; i < MIXTURE_COUNT; i++)
{
// protecting against pixels with 0 in all mixtures
float final_weight = weight_total > 0.0f ? weights[i] / weight_total : 0.0f;
g_batch_output[index + i * element_count] = final_weight;
}
}
#define THREADS 512
#define WARPS 16
#define BLOCK (WARPS << 5)
#define LOAD 4
void GMMInitialize(const float *image, int *alpha, float *gmm, float *scratch_mem, unsigned int batch_count, unsigned int element_count)
{
unsigned int block_count = TILE(element_count, BLOCK * LOAD);
float* block_gmm_scratch = scratch_mem;
GMMSplit_t* gmm_split_scratch = (GMMSplit_t*) scratch_mem;
int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
for (unsigned int k = MIXTURE_COUNT; k < gmm_N; k+=MIXTURE_COUNT)
{
for (unsigned int i = 0; i < k; ++i)
{
CovarianceReductionKernel<WARPS, LOAD><<<{block_count, 1, batch_count}, BLOCK>>>(i, image, alpha, block_gmm_scratch, element_count);
}
CovarianceFinalizationKernel<WARPS, false><<<{k, 1, batch_count}, BLOCK>>>(block_gmm_scratch, gmm, block_count);
GMMFindSplit<<<{1, 1, batch_count}, dim3(BLOCK_SIZE, MIXTURE_COUNT)>>>(gmm_split_scratch, k / MIXTURE_COUNT, gmm);
GMMDoSplit<<<{TILE(element_count, BLOCK_SIZE * DO_SPLIT_DEGENERACY), 1, batch_count}, BLOCK_SIZE>>>(gmm_split_scratch, (k / MIXTURE_COUNT) << 4, image, alpha, element_count);
}
}
void GMMUpdate(const float *image, int *alpha, float *gmm, float *scratch_mem, unsigned int batch_count, unsigned int element_count)
{
unsigned int block_count = TILE(element_count, BLOCK * LOAD);
float* block_gmm_scratch = scratch_mem;
unsigned int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE;
for (unsigned int i = 0; i < gmm_N; ++i)
{
CovarianceReductionKernel<WARPS, LOAD><<<{block_count, 1, batch_count}, BLOCK>>>(i, image, alpha, block_gmm_scratch, element_count);
}
CovarianceFinalizationKernel<WARPS, true><<<{gmm_N, 1, batch_count}, BLOCK>>>(block_gmm_scratch, gmm, block_count);
GMMcommonTerm<<<{1, 1, batch_count}, dim3(BLOCK_SIZE, MIXTURE_COUNT)>>>(gmm);
}
void GMMDataTerm(const float *image, const float *gmm, float* output, unsigned int batch_count, unsigned int element_count)
{
dim3 block(BLOCK_SIZE, 1);
dim3 grid(TILE(element_count, BLOCK_SIZE), 1, batch_count);
GMMDataTermKernel<<<grid, block>>>(image, gmm, output, element_count);
}
void learn_cuda(const float* input, const int* labels, float* gmm, float* scratch_memory, unsigned int batch_count, unsigned int element_count)
{
int* alpha = (int*)scratch_memory;
float* scratch_mem = scratch_memory + batch_count * element_count;
cudaMemcpyAsync(alpha, labels, batch_count * element_count * sizeof(int), cudaMemcpyDeviceToDevice);
GMMInitialize(input, alpha, gmm, scratch_mem, batch_count, element_count);
GMMUpdate(input, alpha, gmm, scratch_mem, batch_count, element_count);
}
void apply_cuda(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count)
{
GMMDataTerm(input, gmm, output, batch_count, element_count);
}
|
f64cbea49a1a3fdbaea3b83e26a7ec499fa629c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* stringsort_app.cu
*
* @brief CUDPP application-level merge sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name StringSort Functions
* @{
*/
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_stringsort.h"
#include "stringsort_kernel.cuh"
#include "limits.h"
#define BLOCKSORT_SIZE 1024
#define DEPTH 8
void dotAdd(unsigned int* d_address,
unsigned int* numSpaces,
unsigned int* packedAddress,
size_t numElements,
size_t stringArrayLength)
{
int numThreads = 128;
int numBlocks = (numElements+numThreads-1)/numThreads;
hipLaunchKernelGGL(( dotAddInclusive), dim3(numBlocks), dim3(numThreads), 0, 0, numSpaces, d_address, packedAddress, numElements, stringArrayLength);
}
void calculateAlignedOffsets(unsigned int* d_address,
unsigned int* numSpaces,
unsigned char* d_stringVals,
unsigned char termC,
size_t numElements,
size_t stringArrayLength)
{
int numThreads = 128;
int numBlocks = (numElements+numThreads-1)/numThreads;
hipLaunchKernelGGL(( alignedOffsets), dim3(numBlocks), dim3(numThreads), 0, 0, numSpaces, d_address, d_stringVals, termC, numElements, stringArrayLength); // caculate the padding length to align
}
void packStrings(unsigned int* packedStrings,
unsigned char* d_stringVals,
unsigned int* d_keys,
unsigned int* packedAddress,
unsigned int* address,
size_t numElements,
size_t stringArrayLength,
unsigned char termC)
{
unsigned int numThreads = 128;
unsigned int numBlocks = (numElements + numThreads - 1)/numThreads;
//Each thread handles one string (irregular parrallelism) other option is to do per character (set of chars)
//but that requires a binary search per character. Efficiency depends on the dataset
hipLaunchKernelGGL(( alignString), dim3(numBlocks), dim3(numThreads), 0, 0, packedStrings, d_stringVals, packedAddress, address, numElements, stringArrayLength, termC); // d_stringVals -> packedStrings
hipLaunchKernelGGL(( createKeys), dim3(numBlocks), dim3(numThreads), 0, 0, d_keys, packedStrings, packedAddress, numElements); // keys
}
void unpackStrings(unsigned int* packedAddress,
unsigned int* packedAddressRef,
unsigned int* address,
unsigned int* addressRef,
size_t numElements)
{
unsigned int numThreads = 128;
unsigned int numBlocks = (numElements + numThreads - 1)/numThreads;
hipLaunchKernelGGL(( unpackAddresses), dim3(numBlocks), dim3(numThreads), 0, 0, packedAddress, packedAddressRef, address, addressRef, numElements);
}
/** @brief Performs merge sor utilzing three stages.
* (1) Blocksort, (2) simple merge and (3) multi merge on a
* set of strings
*
* @param[in,out] pkeys Keys (first four characters of string) to be sorted.
* @param[in,out] pvals Addresses of string locations for tie-breaks
* @param[out] stringVals global string value array (four characters stuffed into a uint)
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergesort.
* @param[in] termC Termination character for our strings
**/
void runStringSort(unsigned int *pkeys,
unsigned int *pvals,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
unsigned char termC,
const CUDPPStringSortPlan *plan)
{
int numPartitions = (numElements+BLOCKSORT_SIZE-1)/BLOCKSORT_SIZE;
int numBlocks = numPartitions/2;
int partitionSize = BLOCKSORT_SIZE;
unsigned int swapPoint = plan->m_swapPoint;
unsigned int subPartitions = plan->m_subPartitions;
int numThreads = 128;
hipLaunchKernelGGL(( blockWiseStringSort<unsigned int, DEPTH>) , dim3(numPartitions), dim3(BLOCKSORT_SIZE/DEPTH), 2*(BLOCKSORT_SIZE)*sizeof(unsigned int), 0,
pkeys, pvals, stringVals, BLOCKSORT_SIZE, numElements, stringArrayLength, termC);
int mult = 1; int count = 0;
//we run p stages of simpleMerge until numBlocks <= some Critical level
while(numPartitions > swapPoint || (partitionSize*mult < 16384 && numPartitions > 1)/* && numPartitions > 1*/)
{
//printf("Running simple merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
if(count%2 == 0)
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>)
, dim3(numBlocks), dim3(CTASIZE_simple), sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4), 0, pkeys, plan->m_tempKeys,
pvals, plan->m_tempAddress, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy);
}
}
else
{
hipLaunchKernelGGL(( simpleStringMerge<unsigned int, 2>)
, dim3(numBlocks), dim3(CTASIZE_simple), sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4), 0, plan->m_tempKeys, pkeys,
plan->m_tempAddress, pvals, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy);
}
}
mult*=2;
count++;
numPartitions = (numPartitions+1)/2;
}
//End of simpleMerge, now blocks cooperate to merge partitions
while (numPartitions > 1)
{
numBlocks = (numPartitions&0xFFFE);
int secondBlocks = ((numBlocks)*subPartitions+numThreads-1)/numThreads;
if(count%2 == 1)
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>)
, dim3(secondBlocks), dim3(numThreads), 0, 0, plan->m_tempKeys, plan->m_tempAddress, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA,
plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, DEPTH_multi>)
, dim3(numBlocks*subPartitions), dim3(CTASIZE_multi), (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int), 0, plan->m_tempKeys, pkeys, plan->m_tempAddress,
pvals, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize,
count, numElements, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy);
}
}
else
{
hipLaunchKernelGGL(( findMultiPartitions<unsigned int>)
, dim3(secondBlocks), dim3(numThreads), 0, 0, pkeys, pvals, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA,
plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
hipLaunchKernelGGL(( stringMergeMulti<unsigned int, DEPTH_multi>)
, dim3(numBlocks*subPartitions), dim3(CTASIZE_multi), (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int), 0, pkeys, plan->m_tempKeys, pvals,
plan->m_tempAddress, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize,
count, numElements, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
hipLaunchKernelGGL(( simpleCopy<unsigned int>)
, dim3((numElementsToCopy+numThreads-1)/numThreads), dim3(numThreads), 0, 0, pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy);
}
}
count++;
mult*=2;
subPartitions*=2;
numPartitions = (numPartitions+1)/2;
}
if(count%2==1)
{
CUDA_SAFE_CALL(hipMemcpy(pkeys, plan->m_tempKeys, numElements*sizeof(unsigned int), hipMemcpyDeviceToDevice));
CUDA_SAFE_CALL(hipMemcpy(pvals, plan->m_tempAddress, numElements*sizeof(unsigned int), hipMemcpyDeviceToDevice));
}
}
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPStringSortPlan object
**/
void allocStringSortStorage(CUDPPStringSortPlan *plan)
{
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_keys, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_tempKeys, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_tempAddress, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_packedAddress, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_packedAddressRef, sizeof(unsigned int)*(plan->m_numElements)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_addressRef, sizeof(unsigned int)*(plan->m_numElements)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_spaceScan, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_numSpaces, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_partitionSizeA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_partitionSizeB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_partitionStartA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(hipMalloc((void**)&plan->m_partitionStartB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
}
/** @brief Deallocates intermediate memory from allocStringSortStorage.
*
*
* @param[in] plan Pointer to CUDPStringSortPlan object
**/
void freeStringSortStorage(CUDPPStringSortPlan* plan)
{
hipFree(plan->m_keys);
hipFree(plan->m_packedAddress);
hipFree(plan->m_packedAddressRef);
hipFree(plan->m_tempKeys);
hipFree(plan->m_tempAddress);
hipFree(plan->m_addressRef);
hipFree(plan->m_numSpaces);
hipFree(plan->m_spaceScan);
hipFree(plan->m_partitionSizeA);
hipFree(plan->m_partitionSizeB);
hipFree(plan->m_partitionStartA);
hipFree(plan->m_partitionStartB);
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls stringSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys (first four chars of string) to be sorted.
* @param[in,out] values Address of string values in array of null terminated strings
* @param[in] stringVals Global string array
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] termC Termination character for our strings
* @param[in] plan Configuration information for mergeSort.
**/
void cudppStringSortDispatch(unsigned int *keys,
unsigned int *values,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
unsigned char termC,
const CUDPPStringSortPlan *plan)
{
runStringSort(keys, values, stringVals, numElements, stringArrayLength, termC, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end stringsort functions
/** @} */ // end cudpp_app
| f64cbea49a1a3fdbaea3b83e26a7ec499fa629c2.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* stringsort_app.cu
*
* @brief CUDPP application-level merge sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name StringSort Functions
* @{
*/
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_stringsort.h"
#include "stringsort_kernel.cuh"
#include "limits.h"
#define BLOCKSORT_SIZE 1024
#define DEPTH 8
void dotAdd(unsigned int* d_address,
unsigned int* numSpaces,
unsigned int* packedAddress,
size_t numElements,
size_t stringArrayLength)
{
int numThreads = 128;
int numBlocks = (numElements+numThreads-1)/numThreads;
dotAddInclusive<<<numBlocks, numThreads>>>(numSpaces, d_address, packedAddress, numElements, stringArrayLength);
}
void calculateAlignedOffsets(unsigned int* d_address,
unsigned int* numSpaces,
unsigned char* d_stringVals,
unsigned char termC,
size_t numElements,
size_t stringArrayLength)
{
int numThreads = 128;
int numBlocks = (numElements+numThreads-1)/numThreads;
alignedOffsets<<<numBlocks, numThreads>>>(numSpaces, d_address, d_stringVals, termC, numElements, stringArrayLength); // caculate the padding length to align
}
void packStrings(unsigned int* packedStrings,
unsigned char* d_stringVals,
unsigned int* d_keys,
unsigned int* packedAddress,
unsigned int* address,
size_t numElements,
size_t stringArrayLength,
unsigned char termC)
{
unsigned int numThreads = 128;
unsigned int numBlocks = (numElements + numThreads - 1)/numThreads;
//Each thread handles one string (irregular parrallelism) other option is to do per character (set of chars)
//but that requires a binary search per character. Efficiency depends on the dataset
alignString<<<numBlocks, numThreads>>>(packedStrings, d_stringVals, packedAddress, address, numElements, stringArrayLength, termC); // d_stringVals -> packedStrings
createKeys<<<numBlocks, numThreads>>>(d_keys, packedStrings, packedAddress, numElements); // keys
}
void unpackStrings(unsigned int* packedAddress,
unsigned int* packedAddressRef,
unsigned int* address,
unsigned int* addressRef,
size_t numElements)
{
unsigned int numThreads = 128;
unsigned int numBlocks = (numElements + numThreads - 1)/numThreads;
unpackAddresses<<<numBlocks, numThreads>>>(packedAddress, packedAddressRef, address, addressRef, numElements);
}
/** @brief Performs merge sor utilzing three stages.
* (1) Blocksort, (2) simple merge and (3) multi merge on a
* set of strings
*
* @param[in,out] pkeys Keys (first four characters of string) to be sorted.
* @param[in,out] pvals Addresses of string locations for tie-breaks
* @param[out] stringVals global string value array (four characters stuffed into a uint)
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] plan Configuration information for mergesort.
* @param[in] termC Termination character for our strings
**/
void runStringSort(unsigned int *pkeys,
unsigned int *pvals,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
unsigned char termC,
const CUDPPStringSortPlan *plan)
{
int numPartitions = (numElements+BLOCKSORT_SIZE-1)/BLOCKSORT_SIZE;
int numBlocks = numPartitions/2;
int partitionSize = BLOCKSORT_SIZE;
unsigned int swapPoint = plan->m_swapPoint;
unsigned int subPartitions = plan->m_subPartitions;
int numThreads = 128;
blockWiseStringSort<unsigned int, DEPTH> <<<numPartitions, BLOCKSORT_SIZE/DEPTH, 2*(BLOCKSORT_SIZE)*sizeof(unsigned int)>>>
(pkeys, pvals, stringVals, BLOCKSORT_SIZE, numElements, stringArrayLength, termC);
int mult = 1; int count = 0;
//we run p stages of simpleMerge until numBlocks <= some Critical level
while(numPartitions > swapPoint || (partitionSize*mult < 16384 && numPartitions > 1)/* && numPartitions > 1*/)
{
//printf("Running simple merge for %d partitions of size %d\n", numPartitions, partitionSize*mult);
numBlocks = (numPartitions&0xFFFE);
if(count%2 == 0)
{
simpleStringMerge<unsigned int, 2>
<<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(pkeys, plan->m_tempKeys,
pvals, plan->m_tempAddress, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy);
}
}
else
{
simpleStringMerge<unsigned int, 2>
<<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(plan->m_tempKeys, pkeys,
plan->m_tempAddress, pvals, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy);
}
}
mult*=2;
count++;
numPartitions = (numPartitions+1)/2;
}
//End of simpleMerge, now blocks cooperate to merge partitions
while (numPartitions > 1)
{
numBlocks = (numPartitions&0xFFFE);
int secondBlocks = ((numBlocks)*subPartitions+numThreads-1)/numThreads;
if(count%2 == 1)
{
findMultiPartitions<unsigned int>
<<<secondBlocks, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA,
plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
stringMergeMulti<unsigned int, DEPTH_multi>
<<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(plan->m_tempKeys, pkeys, plan->m_tempAddress,
pvals, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize,
count, numElements, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy);
}
}
else
{
findMultiPartitions<unsigned int>
<<<secondBlocks, numThreads>>>(pkeys, pvals, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA,
plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC);
//int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements);
stringMergeMulti<unsigned int, DEPTH_multi>
<<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(pkeys, plan->m_tempKeys, pvals,
plan->m_tempAddress, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize,
count, numElements, stringArrayLength, termC);
if(numPartitions%2 == 1)
{
int offset = (partitionSize*mult*(numPartitions-1));
int numElementsToCopy = numElements-offset;
simpleCopy<unsigned int>
<<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy);
}
}
count++;
mult*=2;
subPartitions*=2;
numPartitions = (numPartitions+1)/2;
}
if(count%2==1)
{
CUDA_SAFE_CALL(cudaMemcpy(pkeys, plan->m_tempKeys, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
CUDA_SAFE_CALL(cudaMemcpy(pvals, plan->m_tempAddress, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
}
}
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPStringSortPlan object
**/
void allocStringSortStorage(CUDPPStringSortPlan *plan)
{
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_keys, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_tempKeys, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_tempAddress, sizeof(unsigned int)*plan->m_numElements));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_packedAddress, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_packedAddressRef, sizeof(unsigned int)*(plan->m_numElements)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_addressRef, sizeof(unsigned int)*(plan->m_numElements)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_spaceScan, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_numSpaces, sizeof(unsigned int)*(plan->m_numElements+1)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionSizeA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionSizeB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionStartA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionStartB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4)));
}
/** @brief Deallocates intermediate memory from allocStringSortStorage.
*
*
* @param[in] plan Pointer to CUDPStringSortPlan object
**/
void freeStringSortStorage(CUDPPStringSortPlan* plan)
{
cudaFree(plan->m_keys);
cudaFree(plan->m_packedAddress);
cudaFree(plan->m_packedAddressRef);
cudaFree(plan->m_tempKeys);
cudaFree(plan->m_tempAddress);
cudaFree(plan->m_addressRef);
cudaFree(plan->m_numSpaces);
cudaFree(plan->m_spaceScan);
cudaFree(plan->m_partitionSizeA);
cudaFree(plan->m_partitionSizeB);
cudaFree(plan->m_partitionStartA);
cudaFree(plan->m_partitionStartB);
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls stringSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys (first four chars of string) to be sorted.
* @param[in,out] values Address of string values in array of null terminated strings
* @param[in] stringVals Global string array
* @param[in] numElements Number of elements in the sort.
* @param[in] stringArrayLength The size of our string array in uints (4 chars per uint)
* @param[in] termC Termination character for our strings
* @param[in] plan Configuration information for mergeSort.
**/
void cudppStringSortDispatch(unsigned int *keys,
unsigned int *values,
unsigned int *stringVals,
size_t numElements,
size_t stringArrayLength,
unsigned char termC,
const CUDPPStringSortPlan *plan)
{
runStringSort(keys, values, stringVals, numElements, stringArrayLength, termC, plan);
}
#ifdef __cplusplus
}
#endif
/** @} */ // end stringsort functions
/** @} */ // end cudpp_app
|
bc57caf4e30a154096e989ae0d66261df7b66e67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "netcdf.h"
#include "ncdf.h"
#definehipLaunchKernelGGL(( GFLA) , dim3(dgfla), dim3(dbfla) , 0, 0,
#define loop_R , dGr, dBr , 0, 0,
#define loop_xy , dgxy, dbxy , 0, 0,
#define loop_x , dgx, dbx , 0, 0,
#define loop_y , dgp, dbp , 0, 0,
#define Gmom , dgall, dball , 0, 0,
NetCDF_ids::NetCDF_idsGrids* grids, Parameters* pars, Geometry* geo) :
grids_(grids), pars_(pars), geo_(geo),
red(nullptr), pot(nullptr), ph2(nullptr), all_red(nullptr), grad_phi(nullptr), grad_perp(nullptr)
{
primary = nullptr; secondary = nullptr; tertiary = nullptr; amom = nullptr;
df = nullptr; favg = nullptr;
if (pars_->diagnosing_spectra || pars_->diagnosing_kzspec) {
float dum = 1.0;
red = new All_Reduce(grids_, pars_->wspectra); CUDA_DEBUG("Reductions: %s \n"); // G**2
pot = new Grid_Species_Reduce(grids_, pars_->aspectra); CUDA_DEBUG("Reductions: %s \n"); // (1-G0) Phi**2 keeping track of species
ph2 = new Grid_Reduce(grids_, pars_->aspectra); CUDA_DEBUG("Reductions: %s \n"); // Phi**2
}
int nS = grids_->Nspecies;
int nM = grids_->Nm;
int nL = grids_->Nl;
int nY = grids_->Nyc;
int nYk = grids_->Naky;
int nX = grids_->Nx;
int nXk = grids_->Nakx;
int nZ = grids_->Nz;
int nR = nX * nY * nZ;
int nK = nXk * nYk * nZ;
int nG = nR * grids_->Nmoms * nS;
theta_extended = nullptr;
char strb[263];
strcpy(strb, pars_->run_name);
strcat(strb, ".nc");
int retval, idum;
// Loop over full real-space grid
int nt1 = min(grids_->NxNyNz, 1024);
int nb1 = 1 + (grids_->NxNyNz-1)/nt1;
dBr = dim3(nt1, 1, 1);
dGr = dim3(nb1, 1, 1);
// Loop over x-space grid
nt1 = min(grids_->Nx, 512);
nb1 = 1 + (grids_->Nx-1)/nt1;
dbx = dim3(nt1, 1, 1);
dgx = dim3(nb1, 1, 1);
// Loop over y-space grid
nt1 = min(grids_->Ny, 512);
nb1 = 1 + (grids_->Ny-1)/nt1;
dbp = dim3(nt1, 1, 1);
dgp = dim3(nb1, 1, 1);
// Double loop, over y-space and x-space grids
nt1 = min(32, grids_->Ny);
nb1 = 1 + (grids_->Ny-1)/nt1;
int nt2 = min(32, grids_->Nx);
int nb2 = 1 + (grids_->Nx-1)/nt2;
dbxy = dim3(nt1, nt2, 1);
dgxy = dim3(nb1, nb2, 1);
// Single loop, over Nx*Nyc elements
nt1 = min(128, grids_->Nx);
nb1 = 1 + (grids_->Nx*grids_->Nyc-1)/nt1;
dbfla = dim3(nt1, 1, 1);
dgfla = dim3(nb1, 1, 1);
// Triple loop, native elements
int nt3, nb3;
nt1 = min(16, grids_->Nyc); nb1 = 1 + (grids_->Nyc-1)/nt1;
nt2 = min(16, grids_->Nx); nb2 = 1 + (grids_->Nx -1)/nt2;
nt3 = min(4, grids_->Nz); nb3 = 1 + (grids_->Nz -1)/nt3;
dball = dim3(nt1, nt2, nt3);
dgall = dim3(nb1, nb2, nb3);
if (pars_->write_kmom || pars_->write_xymom || pars_->write_avgz) {
int nbatch = grids_->Nz;
grad_phi = new GradPerp(grids_, nbatch, grids_->NxNycNz);
hipMalloc (&df, sizeof(hipComplex)*grids_->NxNycNz);
hipMalloc (&favg, sizeof(hipComplex)*grids_->Nx);
hipMalloc (&amom, sizeof(hipComplex)*grids_->NxNycNz);
}
if (pars_->ResWrite) {
r_file = pars_->ncresid;
if (retval = nc_redef(r_file));
if (retval = nc_inq_dimid(r_file, "r", &res_dim)) ERR(retval);
if (retval = nc_inq_dimid(r_file, "time", &rtime_dim)) ERR(retval);
// v_ky[0] = res_dim;
// if (retval = nc_def_var(r_file, "r", NC_INT, 1, v_ky, &state)) ERR(retval);
}
if (pars_->write_xymom) {
z_file = pars_->nczid;
if (retval = nc_redef(z_file));
if (retval = nc_inq_dimid(z_file, "x", &zx_dim)) ERR(retval);
if (retval = nc_inq_dimid(z_file, "y", &zy_dim)) ERR(retval);
if (retval = nc_inq_dimid(z_file, "time", &ztime_dim)) ERR(retval);
v_ky[0] = zy_dim;
if (retval = nc_def_var(z_file, "y", NC_FLOAT, 1, v_ky, &zy)) ERR(retval);
v_kx[0] = zx_dim;
if (retval = nc_def_var(z_file, "x", NC_FLOAT, 1, v_kx, &zx)) ERR(retval);
}
file = pars_->ncid;
if (retval = nc_redef(file));
int ri;
// Get handles for the dimensions
if (retval = nc_inq_dimid(file, "ri", &ri)) ERR(retval);
if (retval = nc_def_dim(file, "kz", grids_->Nz, &nkz)) ERR(retval);
if (retval = nc_def_dim(file, "ky", grids_->Naky, &ky_dim)) ERR(retval);
if (retval = nc_def_dim(file, "kx", grids_->Nakx, &kx_dim)) ERR(retval);
if (retval = nc_def_dim(file, "theta", grids_->Nz, &nz)) ERR(retval);
if (retval = nc_inq_dimid(file, "x", &x_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "y", &y_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "m", &m_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "l", &l_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "s", &s_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "time", &time_dim)) ERR(retval);
if (retval = nc_def_var(file, "periodic", NC_INT, 0, 0, &periodic)) ERR(retval);
if (retval = nc_def_var(file, "local_limit", NC_INT, 0, 0, &local_limit)) ERR(retval);
v_ky[0] = ky_dim;
if (retval = nc_def_var(file, "ky", NC_FLOAT, 1, v_ky, &ky)) ERR(retval);
v_kx[0] = kx_dim;
if (retval = nc_def_var(file, "kx", NC_FLOAT, 1, v_kx, &kx)) ERR(retval);
v_kz[0] = nkz;
if (retval = nc_def_var(file, "kz", NC_FLOAT, 1, v_kz, &kz)) ERR(retval);
v_ky[0] = y_dim;
if (retval = nc_def_var(file, "y", NC_FLOAT, 1, v_ky, &y)) ERR(retval);
v_kx[0] = x_dim;
if (retval = nc_def_var(file, "x", NC_FLOAT, 1, v_kx, &x)) ERR(retval);
// v_z[0] = nz;
// if (retval = nc_def_var(file, "z", NC_FLOAT, 1, v_z, &z_h)) ERR(retval);
// z_h needs to be defined.
// Z0 would typically be q R
// and then z_h would run from - (2 pi q R)/2 : + (2 pi q R)/2
// but there are complications to get right:
// normalization of R?
// Allow for Z0 to be specified directly
// Allow nperiod > 1
int nc_sp;
if (retval = nc_inq_grp_ncid(file, "Spectra", &nc_sp)) ERR(retval);
int nc_flux;
if (retval = nc_inq_grp_ncid(file, "Fluxes", &nc_flux)) ERR(retval);
int nc_special;
if (retval = nc_inq_grp_ncid(file, "Special", &nc_special)) ERR(retval);
int nc_zonal;
if (retval = nc_inq_grp_ncid(file, "Zonal_x", &nc_zonal)) ERR(retval);
int nc_geo;
if (retval = nc_inq_grp_ncid(file, "Geometry", &nc_geo)) ERR(retval);
geo_v_theta[0] = nz; int ivar;
if (retval = nc_def_var (file, "theta", NC_FLOAT, 1, geo_v_theta, &theta)) ERR(retval);
if (retval = nc_def_var (nc_geo, "bmag", NC_FLOAT, 1, geo_v_theta, &bmag)) ERR(retval);
if (retval = nc_def_var (nc_geo, "bgrad", NC_FLOAT, 1, geo_v_theta, &bgrad)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gbdrift", NC_FLOAT, 1, geo_v_theta, &gbdrift)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gbdrift0", NC_FLOAT, 1, geo_v_theta, &gbdrift0)) ERR(retval);
if (retval = nc_def_var (nc_geo, "cvdrift", NC_FLOAT, 1, geo_v_theta, &cvdrift)) ERR(retval);
if (retval = nc_def_var (nc_geo, "cvdrift0", NC_FLOAT, 1, geo_v_theta, &cvdrift0)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds2", NC_FLOAT, 1, geo_v_theta, &gds2)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds21", NC_FLOAT, 1, geo_v_theta, &gds21)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds22", NC_FLOAT, 1, geo_v_theta, &gds22)) ERR(retval);
if (retval = nc_def_var (nc_geo, "grho", NC_FLOAT, 1, geo_v_theta, &grho)) ERR(retval);
if (retval = nc_def_var (nc_geo, "jacobian", NC_FLOAT, 1, geo_v_theta, &jacobian)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gradpar", NC_FLOAT, 0, NULL, &ivar)) ERR(retval);
////////////////////////////
// //
// TIME //
// //
////////////////////////////
if (pars_->ResWrite) {
r_time = new nca(0);
r_time -> write_v_time = true;
r_time -> file = r_file;
r_time -> time_dims[0] = rtime_dim;
if (retval = nc_def_var(r_file, "time", NC_DOUBLE, 1, r_time -> time_dims, &r_time -> time)) ERR(retval);
}
if (pars_->write_xymom) {
z_time = new nca(0);
z_time -> write_v_time = true;
z_time -> file = z_file;
z_time -> time_dims[0] = ztime_dim;
if (retval = nc_def_var(z_file, "time", NC_DOUBLE, 1, z_time -> time_dims, &z_time -> time)) ERR(retval);
}
time = new nca(0);
time -> write_v_time = true;
time -> file = file;
time -> time_dims[0] = time_dim;
if (retval = nc_def_var(file, "time", NC_DOUBLE, 1, time -> time_dims, &time -> time)) ERR(retval);
////////////////////////////
// //
// DENSITY //
// //
////////////////////////////
if (pars_->write_moms) {
den = new nca(0);
den -> write = true;
den -> dims[0] = s_dim;
den -> dims[1] = nz;
den -> dims[2] = kx_dim;
den -> dims[3] = ky_dim;
den -> dims[4] = ri;
den -> file = nc_special;
if (retval = nc_def_var(nc_special, "density", NC_FLOAT, 5, den->dims, &den->idx )) ERR(retval);
den -> start[0] = 0;
den -> start[1] = 0;
den -> start[2] = 0;
den -> start[3] = 0;
den -> start[4] = 0;
den -> count[0] = grids_->Nspecies;
den -> count[1] = grids_->Nz;
den -> count[2] = grids_->Nakx;
den -> count[3] = grids_->Naky;
den -> count[4] = 2;
den -> ns = grids_->Nspecies;
} else {
den = new nca(0);
}
////////////////////////////
// //
// DENSITY(t=0) //
// //
////////////////////////////
if (pars_->write_moms) {
den0 = new nca(0);
den0 -> write = true;
den0 -> dims[0] = s_dim;
den0 -> dims[1] = nz;
den0 -> dims[2] = kx_dim;
den0 -> dims[3] = ky_dim;
den0 -> dims[4] = ri;
den0 -> file = nc_special;
if (retval = nc_def_var(nc_special, "density0", NC_FLOAT, 5, den0 -> dims, &den0 -> idx )) ERR(retval);
den0 -> start[0] = 0;
den0 -> start[1] = 0;
den0 -> start[2] = 0;
den0 -> start[3] = 0;
den0 -> start[4] = 0;
den0 -> count[0] = grids_->Nspecies;
den0 -> count[1] = grids_->Nz;
den0 -> count[2] = grids_->Nakx;
den0 -> count[3] = grids_->Naky;
den0 -> count[4] = 2;
den0 -> ns = grids_->Nspecies;
} else {
den0 = new nca(0);
}
////////////////////////////
// //
// Phi //
// //
////////////////////////////
if (pars_->write_phi) {
wphi = new nca(0);
wphi -> write = true;
wphi -> dims[0] = nz;
wphi -> dims[1] = kx_dim;
wphi -> dims[2] = ky_dim;
wphi -> dims[3] = ri;
wphi -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi", NC_FLOAT, 4, wphi -> dims, &wphi -> idx )) ERR(retval);
wphi -> start[0] = 0;
wphi -> start[1] = 0;
wphi -> start[2] = 0;
wphi -> start[3] = 0;
wphi -> count[0] = grids_->Nz;
wphi -> count[1] = grids_->Nakx;
wphi -> count[2] = grids_->Naky;
wphi -> count[3] = 2;
wphi -> ns = 1;
} else {
wphi = new nca(0);
}
////////////////////////////
// //
// Phi(t=0) //
// //
////////////////////////////
if (pars_->write_phi) {
wphi0 = new nca(0);
wphi0 -> write = true;
wphi0 -> dims[0] = nz;
wphi0 -> dims[1] = kx_dim;
wphi0 -> dims[2] = ky_dim;
wphi0 -> dims[3] = ri;
wphi0 -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi0", NC_FLOAT, 4, wphi0 -> dims, &wphi0 -> idx )) ERR(retval);
wphi0 -> start[0] = 0;
wphi0 -> start[1] = 0;
wphi0 -> start[2] = 0;
wphi0 -> start[3] = 0;
wphi0 -> count[0] = grids_->Nz;
wphi0 -> count[1] = grids_->Nakx;
wphi0 -> count[2] = grids_->Naky;
wphi0 -> count[3] = 2;
wphi0 -> ns = 1;
} else {
wphi0 = new nca(0);
}
////////////////////////////
// //
// DENSITY(kpar) //
// //
////////////////////////////
if (pars_->write_phi_kpar and pars_->write_moms) {
denk = new nca(0);
denk -> write = true;
denk -> dims[0] = s_dim;
denk -> dims[1] = nkz;
denk -> dims[2] = kx_dim;
denk -> dims[3] = ky_dim;
denk -> dims[4] = ri;
denk -> file = nc_special;
if (retval = nc_def_var(nc_special, "density_kpar", NC_FLOAT, 5, denk -> dims, &denk -> idx)) ERR(retval);
denk -> start[0] = 0;
denk -> start[1] = 0;
denk -> start[2] = 0;
denk -> start[3] = 0;
denk -> start[4] = 0;
denk -> count[0] = grids_->Nspecies;
denk -> count[1] = grids_->Nz;
denk -> count[2] = grids_->Nakx;
denk -> count[3] = grids_->Naky;
denk -> count[4] = 2;
denk -> ns = 1;
} else {
denk = new nca(0);
}
////////////////////////////
// //
// Phi(kpar) //
// //
////////////////////////////
if (pars_->write_phi_kpar) {
wphik = new nca(0);
wphik -> write = true;
wphik -> dims[0] = nkz;
wphik -> dims[1] = kx_dim;
wphik -> dims[2] = ky_dim;
wphik -> dims[3] = ri;
wphik -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi2_kz", NC_FLOAT, 4, wphik -> dims, &wphik -> idx)) ERR(retval);
wphik -> start[0] = 0;
wphik -> start[1] = 0;
wphik -> start[2] = 0;
wphik -> start[3] = 0;
wphik -> count[0] = grids_->Nz;
wphik -> count[1] = grids_->Nakx;
wphik -> count[2] = grids_->Naky;
wphik -> count[3] = 2;
wphik -> ns = 1;
} else {
wphik = new nca(0);
}
////////////////////////////
// //
// Frequencies //
// //
////////////////////////////
if (pars_->write_omega) {
omg = new nca(-nX * nY, 2 * nXk * nYk);
omg -> write_v_time = true;
omg -> time_dims[0] = time_dim;
omg -> time_dims[1] = ky_dim;
omg -> time_dims[2] = kx_dim;
omg -> time_dims[3] = ri;
omg -> file = nc_special;
if (retval = nc_def_var(nc_special, "omega_v_time", NC_FLOAT, 4, omg -> time_dims, &omg -> time)) ERR(retval);
omg -> time_start[0] = 1;
omg -> time_count[1] = grids_->Naky;
omg -> time_count[2] = grids_->Nakx;
omg -> time_count[3] = 2;
for (int i=0; i < nXk * nYk * 2; i++) omg->cpu[i] = 0.;
} else {
omg = new nca(0);
}
////////////////////////////
// //
// Rosenbluth-Hinton //
// //
////////////////////////////
if (pars_->write_rh) {
rh = new nca(0);
rh -> write = true;
rh -> time_dims[0] = time_dim;
rh -> time_dims[1] = ri;
if (retval = nc_def_var(nc_special, "phi_rh", NC_FLOAT, 2, rh -> time_dims, &rh -> time)) ERR(retval);
rh -> time_count[1] = 2;
} else {
rh = new nca(0);
}
////////////////////////////
// //
// PZT estimates //
// //
////////////////////////////
if (pars_->write_pzt) {
Pzt = new nca(0);
pZt = new nca(0);
pzT = new nca(0);
Pzt -> write_v_time = true;
Pzt -> time_dims[0] = time_dim;
pZt -> time_dims[0] = time_dim;
pzT -> time_dims[0] = time_dim;
Pzt -> file = nc_special;
pZt -> file = nc_special;
pzT -> file = nc_special;
if (retval = nc_def_var(nc_special, "prim", NC_FLOAT, 1, Pzt -> time_dims, &Pzt -> idx)) ERR(retval);
if (retval = nc_def_var(nc_special, "sec", NC_FLOAT, 1, pZt -> time_dims, &pZt -> idx)) ERR(retval);
if (retval = nc_def_var(nc_special, "tert", NC_FLOAT, 1, pzT -> time_dims, &pzT -> idx)) ERR(retval);
hipHostMalloc (&primary, sizeof(float)); primary[0] = 0.;
hipHostMalloc (&secondary, sizeof(float)); secondary[0] = 0.;
hipHostMalloc (&tertiary, sizeof(float)); tertiary[0] = 0.;
hipMalloc (&t_bar, sizeof(hipComplex) * nR * nS);
} else {
Pzt = new nca(0);
pZt = new nca(0);
pzT = new nca(0);
}
////////////////////////////
// //
// (1-G0)phi**2 (species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_species] > 0) {
Ps = new nca(nS);
Ps -> write_v_time = true;
Ps -> time_dims[0] = time_dim;
Ps -> time_dims[1] = s_dim;
Ps -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pst", NC_FLOAT, 2, Ps -> time_dims, &Ps -> time)) ERR(retval);
Ps -> time_count[1] = grids_->Nspecies;
} else {
Ps = new nca(0);
}
////////////////////////////
// //
// P (kx, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kx] > 0) {
Pkx = new nca(nX*nS, nXk*nS);
Pkx -> write_v_time = true;
Pkx -> time_dims[0] = time_dim;
Pkx -> time_dims[1] = s_dim;
Pkx -> time_dims[2] = kx_dim;
Pkx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkxst", NC_FLOAT, 3, Pkx -> time_dims, &Pkx -> time)) ERR(retval);
Pkx -> time_count[1] = grids_->Nspecies;
Pkx -> time_count[2] = grids_->Nakx;
} else {
Pkx = new nca(0);
}
////////////////////////////
// //
// P (ky, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_ky] > 0) {
Pky = new nca(nY*nS, nYk*nS);
Pky -> write_v_time = true;
Pky -> time_dims[0] = time_dim;
Pky -> time_dims[1] = s_dim;
Pky -> time_dims[2] = ky_dim;
Pky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkyst", NC_FLOAT, 3, Pky->time_dims, &Pky->time)) ERR(retval);
Pky -> time_count[1] = grids_->Nspecies;
Pky -> time_count[2] = grids_->Naky;
} else {
Pky = new nca(0);
}
////////////////////////////
// //
// P (kz, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kz] > 0) {
Pkz = new nca(nZ*nS, nZ*nS);
Pkz -> write_v_time = true;
Pkz -> time_dims[0] = time_dim;
Pkz -> time_dims[1] = s_dim;
Pkz -> time_dims[2] = nkz;
Pkz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkzst", NC_FLOAT, 3, Pkz -> time_dims, &Pkz -> time)) ERR(retval);
Pkz -> time_count[1] = grids_->Nspecies;
Pkz -> time_count[2] = grids_->Nz;
} else {
Pkz = new nca(0);
}
////////////////////////////
// //
// P (z, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_z] > 0) {
Pz = new nca(nZ*nS);
Pz -> write_v_time = true;
Pz -> time_dims[0] = time_dim;
Pz -> time_dims[1] = s_dim;
Pz -> time_dims[2] = nz;
Pz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pzst", NC_FLOAT, 3, Pz -> time_dims, &Pz -> time)) ERR(retval);
Pz -> time_count[1] = grids_->Nspecies;
Pz -> time_count[2] = grids_->Nz;
} else {
Pz = new nca(0);
}
////////////////////////////
// //
// P (kx,ky, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kxky] > 0) {
Pkxky = new nca(nX * nY * nS, nXk * nYk * nS);
Pkxky -> write_v_time = true;
Pkxky -> time_dims[0] = time_dim;
Pkxky -> time_dims[1] = s_dim;
Pkxky -> time_dims[2] = ky_dim;
Pkxky -> time_dims[3] = kx_dim;
Pkxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkxkyst", NC_FLOAT, 4, Pkxky -> time_dims, &Pkxky -> time)) ERR(retval);
Pkxky -> time_count[1] = grids_->Nspecies;
Pkxky -> time_count[2] = grids_->Naky;
Pkxky -> time_count[3] = grids_->Nakx;
} else {
Pkxky = new nca(0);
}
////////////////////////////
// //
// W (species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_species] > 0) {
Ws = new nca(nS);
Ws -> write_v_time = true;
Ws -> time_dims[0] = time_dim;
Ws -> time_dims[1] = s_dim;
Ws -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wst", NC_FLOAT, 2, Ws -> time_dims, &Ws -> time)) ERR(retval);
Ws -> time_count[1] = grids_->Nspecies;
} else {
Ws = new nca(0);
}
////////////////////////////
// //
// W (kx, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kx] > 0) {
Wkx = new nca(nX*nS, nXk*nS);
Wkx -> write_v_time = true;
Wkx -> time_dims[0] = time_dim;
Wkx -> time_dims[1] = s_dim;
Wkx -> time_dims[2] = kx_dim;
Wkx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkxst", NC_FLOAT, 3, Wkx -> time_dims, &Wkx -> time)) ERR(retval);
Wkx -> time_count[1] = grids_->Nspecies;
Wkx -> time_count[2] = grids_->Nakx;
} else {
Wkx = new nca(0);
}
////////////////////////////
// //
// W (ky, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_ky] > 0) {
Wky = new nca(nY*nS, nYk*nS);
Wky -> write_v_time = true;
Wky -> time_dims[0] = time_dim;
Wky -> time_dims[1] = s_dim;
Wky -> time_dims[2] = ky_dim;
Wky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkyst", NC_FLOAT, 3, Wky -> time_dims, &Wky -> time)) ERR(retval);
Wky -> time_count[1] = grids_->Nspecies;
Wky -> time_count[2] = grids_->Naky;
} else {
Wky = new nca(0);
}
////////////////////////////
// //
// W (kz, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kz] > 0) {
Wkz = new nca(nZ * nS, nZ * nS);
Wkz -> write_v_time = true;
Wkz -> time_dims[0] = time_dim;
Wkz -> time_dims[1] = s_dim;
Wkz -> time_dims[2] = nkz;
Wkz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkzst", NC_FLOAT, 3, Wkz -> time_dims, &Wkz -> time)) ERR(retval);
Wkz -> time_count[1] = grids_->Nspecies;
Wkz -> time_count[2] = grids_->Nz;
} else {
Wkz = new nca(0);
}
////////////////////////////
// //
// W (z, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_z] > 0) {
Wz = new nca(nZ*nS);
Wz -> write_v_time = true;
Wz -> time_dims[0] = time_dim;
Wz -> time_dims[1] = s_dim;
Wz -> time_dims[2] = nz;
Wz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wzst", NC_FLOAT, 3, Wz -> time_dims, &Wz -> time)) ERR(retval);
Wz -> time_count[1] = grids_->Nspecies;
Wz -> time_count[2] = grids_->Nz;
} else {
Wz = new nca(0);
}
////////////////////////////
// //
// W (kx,ky, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kxky] > 0) {
Wkxky = new nca(nX * nY * nS, nXk * nYk * nS);
Wkxky -> write_v_time = true;
Wkxky -> time_dims[0] = time_dim;
Wkxky -> time_dims[1] = s_dim;
Wkxky -> time_dims[2] = ky_dim;
Wkxky -> time_dims[3] = kx_dim;
Wkxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkxkyst", NC_FLOAT, 4, Wkxky -> time_dims, &Wkxky -> time)) ERR(retval);
Wkxky -> time_count[1] = grids_->Nspecies;
Wkxky -> time_count[2] = grids_->Naky;
Wkxky -> time_count[3] = grids_->Nakx;
} else {
Wkxky = new nca(0);
}
////////////////////////////
// //
// W (adiabatic species) //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_species] > 0) {
As = new nca(1);
As -> write_v_time;
As -> time_dims[0] = time_dim;
As -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "At", NC_FLOAT, 1, As -> time_dims, &As -> time)) ERR(retval);
} else {
As = new nca(0);
}
////////////////////////////
// //
// W (kx) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kx] > 0) {
Akx = new nca(nX, nXk);
Akx -> write_v_time = true;
Akx -> time_dims[0] = time_dim;
Akx -> time_dims[1] = kx_dim;
Akx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akxst", NC_FLOAT, 2, Akx -> time_dims, &Akx -> time)) ERR(retval);
Akx -> time_count[1] = grids_->Nakx;
} else {
Akx = new nca(0);
}
////////////////////////////
// //
// W (ky) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_ky] > 0) {
Aky = new nca(nY, nYk);
Aky -> write_v_time = true;
Aky -> time_dims[0] = time_dim;
Aky -> time_dims[1] = ky_dim;
Aky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akyst", NC_FLOAT, 2, Aky -> time_dims, &Aky -> time)) ERR(retval);
Aky -> time_count[1] = grids_->Naky;
} else {
Aky = new nca(0);
}
////////////////////////////
// //
// A (kz) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kz] > 0) {
Akz = new nca(nZ, nZ);
Akz -> write_v_time = true;
Akz -> time_dims[0] = time_dim;
Akz -> time_dims[1] = nkz;
Akz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akzst", NC_FLOAT, 2, Akz -> time_dims, &Akz -> time)) ERR(retval);
Akz -> time_count[1] = grids_->Nz;
} else {
Akz = new nca(0);
}
////////////////////////////
// //
// A (z) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_z] > 0) {
Az = new nca(nZ);
Az -> write_v_time = true;
Az -> time_dims[0] = time_dim;
Az -> time_dims[1] = nz;
Az -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Azst", NC_FLOAT, 2, Az -> time_dims, &Az -> time)) ERR(retval);
Az -> time_count[1] = grids_->Nz;
} else {
Az = new nca(0);
}
////////////////////////////
// //
// W (kx,ky) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kxky] > 0) {
Akxky = new nca(nX * nY, nXk * nYk);
Akxky -> write_v_time = true;
Akxky -> time_dims[0] = time_dim;
Akxky -> time_dims[1] = ky_dim;
Akxky -> time_dims[2] = kx_dim;
Akxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akxkyst", NC_FLOAT, 3, Akxky -> time_dims, &Akxky -> time)) ERR(retval);
Akxky -> time_count[1] = grids_->Naky;
Akxky -> time_count[2] = grids_->Nakx;
} else {
Akxky = new nca(0);
}
////////////////////////////
// //
// Lag-Herm spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_lm] > 0) {
Wlm = new nca(nL*nM*nS);
Wlm -> write_v_time = true;
Wlm -> time_dims[0] = time_dim;
Wlm -> time_dims[1] = s_dim;
Wlm -> time_dims[2] = m_dim;
Wlm -> time_dims[3] = l_dim;
Wlm -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wlmst", NC_FLOAT, 4, Wlm -> time_dims, &Wlm -> time)) ERR(retval);
Wlm -> time_count[1] = grids_->Nspecies;
Wlm -> time_count[2] = grids_->Nm;
Wlm -> time_count[3] = grids_->Nl;
} else {
Wlm = new nca(0);
}
////////////////////////////
// //
// Laguerre spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_l] > 0) {
Wl = new nca(nL*nS);
Wl -> write_v_time = true;
Wl -> time_dims[0] = time_dim;
Wl -> time_dims[1] = s_dim;
Wl -> time_dims[2] = l_dim;
Wl -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wlst", NC_FLOAT, 3, Wl -> time_dims, &Wl -> time)) ERR(retval);
Wl -> time_count[1] = grids_->Nspecies;
Wl -> time_count[2] = grids_->Nl;
} else {
Wl = new nca(0);
}
////////////////////////////
// //
// Hermite spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_m] > 0) {
Wm = new nca(nM*nS);
Wm -> write_v_time = true;
Wm -> time_dims[0] = time_dim;
Wm -> time_dims[1] = s_dim;
Wm -> time_dims[2] = m_dim;
Wm -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wmst", NC_FLOAT, 3, Wm -> time_dims, &Wm -> time)) ERR(retval);
Wm -> time_count[1] = grids_->Nspecies;
Wm -> time_count[2] = grids_->Nm;
} else {
Wm = new nca(0);
}
bool linked = (not pars_->local_limit && not pars_->boundary_option_periodic);
/*
if (linked && false) {
zkxky[0] = nz;
zkxky[1] = kx_dim;
zkxky[2] = ky_dim;
zkxky -> file = nc_special;
if (retval = nc_def_var(nc_special, "theta_x", NC_FLOAT, 3, zkxky, &theta_x)) ERR(retval);
}
*/
////////////////////////////
// //
// <v_ExB>_y,z (x) //
// //
////////////////////////////
if (pars_->write_vEy) {
vEy = new nca(grids_->NxNyNz, grids_->Nx);
vEy -> write_v_time = true;
vEy -> time_dims[0] = time_dim;
vEy -> time_dims[1] = x_dim;
vEy -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "vEy_xt", NC_FLOAT, 2, vEy->time_dims, &vEy->time)) ERR(retval);
vEy -> time_count[1] = grids_->Nx;
vEy -> xdata = true;
vEy -> dx = true;
} else {
vEy = new nca(0);
}
if (pars_->write_avg_zvE) {
avg_zvE = new nca(grids_->NxNyNz, grids_->Nx);
avg_zvE -> write_v_time = true;
avg_zvE -> time_dims[0] = time_dim;
avg_zvE -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zvE_t", NC_FLOAT, 1,
avg_zvE->time_dims, &avg_zvE->time)) ERR(retval);
avg_zvE -> scalar = true;
avg_zvE -> dx = true;
} else {
avg_zvE = new nca(0);
}
if (pars_->write_avg_zkxvEy) {
avg_zkxvEy = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkxvEy -> write_v_time = true;
avg_zkxvEy -> time_dims[0] = time_dim;
avg_zkxvEy -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkxvEy_t", NC_FLOAT, 1,
avg_zkxvEy->time_dims, &avg_zkxvEy->time)) ERR(retval);
avg_zkxvEy -> scalar = true;
avg_zkxvEy -> d2x = true;
} else {
avg_zkxvEy = new nca(0);
}
if (pars_->write_avg_zkden) {
avg_zkden = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkden -> write_v_time = true;
avg_zkden -> time_dims[0] = time_dim;
avg_zkden -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkden_t", NC_FLOAT, 1,
avg_zkden->time_dims, &avg_zkden->time)) ERR(retval);
avg_zkden -> scalar = true;
avg_zkden -> dx = true;
} else {
avg_zkden = new nca(0);
}
if (pars_->write_avg_zkUpar) {
avg_zkUpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkUpar -> write_v_time = true;
avg_zkUpar -> time_dims[0] = time_dim;
avg_zkUpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkUpar_t", NC_FLOAT, 1,
avg_zkUpar->time_dims, &avg_zkUpar->time)) ERR(retval);
avg_zkUpar -> scalar = true;
avg_zkUpar -> dx = true;
} else {
avg_zkUpar = new nca(0);
}
if (pars_->write_avg_zkTpar) {
avg_zkTpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkTpar -> write_v_time = true;
avg_zkTpar -> time_dims[0] = time_dim;
avg_zkTpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkTpar_t", NC_FLOAT, 1,
avg_zkTpar->time_dims, &avg_zkTpar->time)) ERR(retval);
avg_zkTpar -> scalar = true;
avg_zkTpar -> dx = true;
avg_zkTpar -> adj = sqrtf(2.0);
} else {
avg_zkTpar = new nca(0);
}
if (pars_->write_avg_zkqpar) {
avg_zkqpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkqpar -> write_v_time = true;
avg_zkqpar -> time_dims[0] = time_dim;
avg_zkqpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkqpar_t", NC_FLOAT, 1,
avg_zkqpar->time_dims, &avg_zkqpar->time)) ERR(retval);
avg_zkqpar -> scalar = true;
avg_zkqpar -> dx = true;
avg_zkqpar -> adj = sqrtf(6.0);
} else {
avg_zkqpar = new nca(0);
}
if (pars_->write_avg_zkTperp) {
avg_zkTperp = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkTperp -> write_v_time = true;
avg_zkTperp -> time_dims[0] = time_dim;
avg_zkTperp -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkTperp_t", NC_FLOAT, 1,
avg_zkTperp->time_dims, &avg_zkTperp->time)) ERR(retval);
avg_zkTperp -> scalar = true;
avg_zkTperp -> dx = true;
} else {
avg_zkTperp = new nca(0);
}
////////////////////////////
// //
// <d/dx v_ExB>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kxvEy) {
kxvEy = new nca(grids_->NxNyNz, grids_->Nx);
kxvEy -> write_v_time = true;
kxvEy -> time_dims[0] = time_dim;
kxvEy -> time_dims[1] = x_dim;
kxvEy -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kxvEy_xt", NC_FLOAT, 2, kxvEy -> time_dims, &kxvEy -> time)) ERR(retval);
kxvEy -> time_count[1] = grids_->Nx;
kxvEy -> xdata = true;
kxvEy -> d2x = true;
} else {
kxvEy = new nca(0);
}
////////////////////////////
// //
// <d/dx denh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kden) {
kden = new nca(grids_->NxNyNz, grids_->Nx);
kden -> write_v_time = true;
kden -> time_dims[0] = time_dim;
kden -> time_dims[1] = x_dim;
kden -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kden_xt", NC_FLOAT, 2, kden->time_dims, &kden->time)) ERR(retval);
kden -> time_count[1] = grids_->Nx;
kden -> xdata = true;
kden -> dx = true;
} else {
kden = new nca(0);
}
////////////////////////////
// //
// <d/dx uparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kUpar) {
kUpar = new nca(grids_->NxNyNz, grids_->Nx);
kUpar -> write_v_time = true;
kUpar -> time_dims[0] = time_dim;
kUpar -> time_dims[1] = x_dim;
kUpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kUpar_xt", NC_FLOAT, 2, kUpar->time_dims, &kUpar->time)) ERR(retval);
kUpar->time_count[1] = grids_->Nx;
kUpar->xdata = true;
kUpar -> dx = true;
} else {
kUpar = new nca(0);
}
////////////////////////////
// //
// <d/dx Tparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kTpar) {
kTpar = new nca(grids_->NxNyNz, grids_->Nx);
kTpar->write_v_time = true;
kTpar -> time_dims[0] = time_dim;
kTpar -> time_dims[1] = x_dim;
kTpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kTpar_xt", NC_FLOAT, 2, kTpar->time_dims, &kTpar->time)) ERR(retval);
kTpar -> time_count[1] = grids_->Nx;
kTpar -> xdata = true;
kTpar -> dx = true;
kTpar -> adj = sqrtf(2.0);
} else {
kTpar = new nca(0);
}
////////////////////////////
// //
// <d/dx Tperph>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kTperp) {
kTperp = new nca(grids_->NxNyNz, grids_->Nx);
kTperp -> write_v_time = true;
kTperp -> time_dims[0] = time_dim;
kTperp -> time_dims[1] = x_dim;
kTperp -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kTperp_xt", NC_FLOAT, 2, kTperp->time_dims, &kTperp->time)) ERR(retval);
kTperp -> time_count[1] = grids_->Nx;
kTperp -> xdata = true;
kTperp -> dx = true;
} else {
kTperp = new nca(0);
}
////////////////////////////
// //
// <d/dx qparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kqpar) {
kqpar = new nca(grids_->NxNyNz, grids_->Nx);
kqpar -> write_v_time = true;
kqpar -> time_dims[0] = time_dim;
kqpar -> time_dims[1] = x_dim;
kqpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kqpar_xt", NC_FLOAT, 2, kqpar -> time_dims, &kqpar->time)) ERR(retval);
kqpar -> time_count[1] = grids_->Nx;
kqpar -> xdata = true;
kqpar -> dx = true;
kqpar -> adj = sqrtf(6.0);
} else {
kqpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <v_ExB> (x, y) //
// //
////////////////////////////
if (pars_->write_xyvEy) {
xyvEy = new nca(grids_->NxNyNz, grids_->NxNy);
xyvEy->write_v_time = true;
xyvEy -> time_dims[0] = ztime_dim;
xyvEy -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyvEy -> time_dims[2] = zx_dim;
xyvEy -> file = z_file;
if (retval = nc_def_var(z_file, "vEy_xyt", NC_FLOAT, 3, xyvEy -> time_dims, &xyvEy->time)) ERR(retval);
xyvEy -> time_count[1] = grids_->Ny;
xyvEy -> time_count[2] = grids_->Nx;
xyvEy -> xydata = true;
xyvEy -> dx = true;
} else {
xyvEy = new nca(0);
}
////////////////////////////
// Non-zonal //
// <d/dx v_ExB,y> (x, y) //
// //
////////////////////////////
if (pars_ -> write_xykxvEy) {
xykxvEy = new nca(grids_->NxNyNz, grids_->NxNy);
xykxvEy -> write_v_time = true;
xykxvEy -> time_dims[0] = ztime_dim;
xykxvEy -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xykxvEy -> time_dims[2] = zx_dim;
xykxvEy -> file = z_file;
if (retval = nc_def_var(z_file, "kxvEy_xyt", NC_FLOAT, 3, xykxvEy -> time_dims, &xykxvEy->time)) ERR(retval);
xykxvEy -> time_count[1] = grids_->Ny;
xykxvEy -> time_count[2] = grids_->Nx;
xykxvEy -> xydata = true;
xykxvEy -> d2x = true;
} else {
xykxvEy = new nca(0);
}
////////////////////////////
// Non-zonal //
// <den> (x, y) //
// //
////////////////////////////
if (pars_->write_xyden) {
xyden = new nca(grids_->NxNyNz, grids_->NxNy);
xyden->write_v_time = true;
xyden -> time_dims[0] = ztime_dim;
xyden -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyden -> time_dims[2] = zx_dim;
xyden -> file = z_file;
if (retval = nc_def_var(z_file, "den_xyt", NC_FLOAT, 3, xyden -> time_dims, &xyden->time)) ERR(retval);
xyden -> time_count[1] = grids_->Ny;
xyden -> time_count[2] = grids_->Nx;
xyden -> xydata = true;
} else {
xyden = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Upar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyUpar) {
xyUpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyUpar->write_v_time = true;
xyUpar -> time_dims[0] = ztime_dim;
xyUpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyUpar -> time_dims[2] = zx_dim;
xyUpar -> file = z_file;
if (retval = nc_def_var(z_file, "upar_xyt", NC_FLOAT, 3, xyUpar -> time_dims, &xyUpar->time)) ERR(retval);
xyUpar -> time_count[1] = grids_->Ny;
xyUpar -> time_count[2] = grids_->Nx;
xyUpar -> xydata = true;
} else {
xyUpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Tpar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyTpar) {
xyTpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyTpar->write_v_time = true;
xyTpar -> time_dims[0] = ztime_dim;
xyTpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyTpar -> time_dims[2] = zx_dim;
xyTpar -> file = z_file;
if (retval = nc_def_var(z_file, "Tpar_xyt", NC_FLOAT, 3, xyTpar -> time_dims, &xyTpar->time)) ERR(retval);
xyTpar -> time_count[1] = grids_->Ny;
xyTpar -> time_count[2] = grids_->Nx;
xyTpar -> xydata = true;
xyTpar -> adj = sqrtf(2.0);
} else {
xyTpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Tperp> (x, y) //
// //
////////////////////////////
if (pars_->write_xyTperp) {
xyTperp = new nca(grids_->NxNyNz, grids_->NxNy);
xyTperp -> write_v_time = true;
xyTperp -> time_dims[0] = ztime_dim;
xyTperp -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyTperp -> time_dims[2] = zx_dim;
xyTperp -> file = z_file;
if (retval = nc_def_var(z_file, "Tperp_xyt", NC_FLOAT, 3, xyTperp -> time_dims, &xyTperp->time)) ERR(retval);
xyTperp -> time_count[1] = grids_->Ny;
xyTperp -> time_count[2] = grids_->Nx;
xyTperp -> xydata = true;
} else {
xyTperp = new nca(0);
}
////////////////////////////
// Non-zonal //
// <qpar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyqpar) {
xyqpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyqpar->write_v_time = true;
xyqpar -> time_dims[0] = ztime_dim;
xyqpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyqpar -> time_dims[2] = zx_dim;
xyqpar -> file = z_file;
if (retval = nc_def_var(z_file, "qpar_xyt", NC_FLOAT, 3, xyqpar -> time_dims, &xyqpar->time)) ERR(retval);
xyqpar -> time_count[1] = grids_->Ny;
xyqpar -> time_count[2] = grids_->Nx;
xyqpar -> xydata = true;
xyqpar -> adj = sqrtf(6.0);
} else {
xyqpar = new nca(0);
}
if (pars_->ks && pars_->ResWrite) {
r_y = new nca(pars_->ResQ * grids_->NxNyNz * grids_->Nmoms);
r_y -> write_v_time = true;
r_y -> time_dims[0] = rtime_dim;
r_y -> time_dims[1] = res_dim;
r_y -> file = r_file;
if (retval = nc_def_var(r_file, "r", NC_DOUBLE, 2, r_y -> time_dims, &r_y -> time)) ERR(retval);
r_y -> time_count[1] = pars_->ResQ * grids_->NxNyNz*grids_->Nmoms;
} else {
r_y = new nca(0);
}
////////////////////////////
// //
// g(y) for K-S eqn //
// //
////////////////////////////
if (pars_->ks && pars_->write_ks) {
g_y = new nca(grids_->Ny);
g_y -> write_v_time = true;
g_y -> time_dims[0] = time_dim;
g_y -> time_dims[1] = y_dim;
g_y -> file = nc_special;
if (retval = nc_def_var(nc_special, "g_yt", NC_FLOAT, 2, g_y -> time_dims, &g_y -> time)) ERR(retval);
g_y -> time_count[1] = grids_->Ny;
int nbatch = 1;
grad_perp = new GradPerp(grids_, nbatch, grids_->Nyc);
} else {
g_y = new nca(0);
}
////////////////////////////
// //
// Free energy //
// //
////////////////////////////
if (pars_->write_free_energy) {
Wtot = new nca(0);
Wtot -> write_v_time = true;
Wtot -> time_dims[0] = time_dim;
Wtot -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "W", NC_FLOAT, 1, Wtot -> time_dims, &Wtot -> time)) ERR(retval);
totW = 0.;
} else {
Wtot = new nca(0);
}
////////////////////////////
// //
// Heat fluxes //
// //
////////////////////////////
if (pars_->write_fluxes ) {
qs = new nca(nS);
qs -> write_v_time = true;
qs -> time_dims[0] = time_dim;
qs -> time_dims[1] = s_dim;
qs -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "qflux", NC_FLOAT, 2, qs -> time_dims, &qs -> time)) ERR(retval);
qs -> time_count[1] = grids_->Nspecies;
all_red = new Species_Reduce(nR, nS); hipDeviceSynchronize(); CUDA_DEBUG("Reductions: %s \n");
} else {
qs = new nca(0);
}
DEBUGPRINT("ncdf: ending definition mode for NetCDF \n");
if (retval = nc_enddef(file)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_enddef(z_file)) ERR(retval);
}
if (pars_->ResWrite) {
if (retval = nc_enddef(r_file)) ERR(retval);
}
///////////////////////////////////
// //
// x //
// //
///////////////////////////////////
x_start[0] = 0;
x_count[0] = grids_->Nx;
if (retval = nc_put_vara(file, x, x_start, x_count, grids_->x_h)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_put_vara(z_file, zx, x_start, x_count, grids_->x_h)) ERR(retval);
}
///////////////////////////////////
// //
// y //
// //
///////////////////////////////////
y_start[0] = 0;
y_count[0] = grids_->Ny;
if (retval = nc_put_vara(file, y, y_start, y_count, grids_->y_h)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_put_vara(z_file, zy, y_start, y_count, grids_->y_h)) ERR(retval);
}
///////////////////////////////////
// //
// z //
// //
///////////////////////////////////
z_start[0] = 0;
z_count[0] = grids_->Nz;
// if (retval = nc_put_vara(file, z, z_start, z_count, z_h)) ERR(retval);
///////////////////////////////////
// //
// kz //
// //
///////////////////////////////////
kz_start[0] = 0;
kz_count[0] = grids_->Nz;
for (int i=0; i<grids_->Nz; i++) grids_->kpar_outh[i] = geo_->gradpar*grids_->kz_outh[i];
if (retval = nc_put_vara(file, kz, kz_start, kz_count, grids_->kpar_outh)) ERR(retval);
///////////////////////////////////
// //
// ky //
// //
///////////////////////////////////
ky_start[0] = 0;
ky_count[0] = grids_->Naky;
if (retval = nc_put_vara(file, ky, ky_start, ky_count, grids_->ky_h)) ERR(retval);
///////////////////////////////////
// //
// kx //
// //
///////////////////////////////////
kx_start[0] = 0;
kx_count[0] = grids_->Nakx;
if (retval = nc_put_vara(file, kx, kx_start, kx_count, grids_->kx_outh)) ERR(retval);
///////////////////////////////////
// //
// geometric information //
// //
///////////////////////////////////
geo_start[0] = 0;
geo_count[0] = grids_->Nz;
if (retval = nc_put_vara(file, theta, geo_start, geo_count, geo_->z_h)) ERR(retval);
if (linked && false) {
int Nx = grids_->Nx;
int Ny = grids_->Ny;
int Nz = grids_->Nz;
int Naky = grids_->Naky;
zkxky_count[0] = Nz;
zkxky_count[1] = 1;
zkxky_count[2] = 1;
size_t size = sizeof(float)*Nz;
hipHostMalloc((void**) &theta_extended, size);
float th0;
for (int i=0; i<(Nx-1)/3+1; i++) {
for (int j=0; j<(Ny-1)/3+1; j++) {
if (j==0) {th0 = 0.;} else {th0 = grids_->kx_h[i]/(grids_->ky_h[j]*pars_->shat);}
for (int k=0; k<Nz; k++) {
theta_extended[k] = geo_->z_h[k] - th0;
}
zkxky_start[0] = 0;
zkxky_start[1] = i;
zkxky_start[2] = j;
if (retval = nc_put_vara(nc_geo, theta_x, zkxky_start, zkxky_count, theta_extended)) ERR(retval);
}
}
for(int i=2*Nx/3+1; i<Nx; i++) {
for(int j=0; j<Naky; j++) {
if (j==0) {th0 = 0.;} else {th0 = grids_->kx_h[i]/(grids_->ky_h[j]*pars_->shat);}
for (int k=0; k<Nz; k++) {
theta_extended[k] = geo_->z_h[k] - th0;
}
zkxky_start[0] = 0;
zkxky_start[1] = i-2*Nx/3+(Nx-1)/3;
zkxky_start[2] = j;
if (retval = nc_put_vara(nc_geo, theta_x, zkxky_start, zkxky_count, theta_extended)) ERR(retval);
}
}
if (theta_extended) hipHostFree(theta_extended);
}
// if (retval = nc_put_vara(file, theta, geo_start, geo_count, geo_->z_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, bmag, geo_start, geo_count, geo_->bmag_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, bgrad, geo_start, geo_count, geo_->bgrad_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gbdrift, geo_start, geo_count, geo_->gbdrift_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gbdrift0, geo_start, geo_count, geo_->gbdrift0_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, cvdrift, geo_start, geo_count, geo_->cvdrift_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, cvdrift0, geo_start, geo_count, geo_->cvdrift0_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds2, geo_start, geo_count, geo_->gds2_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds21, geo_start, geo_count, geo_->gds21_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds22, geo_start, geo_count, geo_->gds22_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, grho, geo_start, geo_count, geo_->grho_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, jacobian, geo_start, geo_count, geo_->jacobian_h)) ERR(retval);
if (retval = nc_put_var (nc_geo, ivar, &geo_->gradpar)) ERR(retval);
idum = pars_->boundary_option_periodic ? 1 : 0;
if (retval = nc_put_var(file, periodic, &idum)) ERR(retval);
idum = pars_->local_limit ? 1 : 0;
if (retval = nc_put_var(file, local_limit, &idum)) ERR(retval);
}
NetCDF_ids::~NetCDF_ids() {
if (primary) hipHostFree ( primary );
if (secondary) hipHostFree ( secondary );
if (tertiary) hipHostFree ( tertiary );
if (amom) hipFree ( amom );
if (df) hipFree ( df );
if (favg) hipFree ( favg );
if (red) delete red;
if (pot) delete pot;
if (ph2) delete ph2;
if (all_red) delete all_red;
}
void NetCDF_ids::write_zonal_nc(nca *D, bool endrun) {
int retval;
if (D->write && endrun) {
if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &D->zonal)) ERR(retval);
}
if (D->write_v_time) {
if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &D->zonal)) ERR(retval);
}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, D->cpu)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, D->cpu)) ERR(retval);}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, double data, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &data)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &data)) ERR(retval);}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, float data, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &data)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &data)) ERR(retval);}
D->increment_ts();
}
/*
void NetCDF_ids::pzt(MomentsG* G, Fields* f)
{
int threads=256;
int blocks=(grids_->NxNycNz+threads-1)/threads;
primary[0]=0.; secondary[0]=0.; tertiary[0]=0.;
Tbar <<<blocks, threads>>> (t_bar, G->G(), f->phi, geo_->kperp2);
get_pzt <<<blocks, threads>>> (&primary[0], &secondary[0], &tertiary[0], f->phi, t_bar);
}
*/
void NetCDF_ids::write_Pky(float* P2, bool endrun)
{
if (Pky -> write_v_time || (Pky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nspecies;
pot->Sum(P2, Pky->data, PSPECTRA_ky); CP_TO_CPU(Pky->tmp, Pky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
for (int ik = 0; ik < grids_->Naky; ik++) {
Pky->cpu[ik + is*grids_->Naky] = Pky->tmp[ik + is*grids_->Nyc];
}
}
write_nc(Pky, endrun);
}
}
void NetCDF_ids::write_Pkx(float* P2, bool endrun)
{
if (Pkx -> write_v_time || (Pkx -> write && endrun)) {
int i = grids_->Nx*grids_->Nspecies;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
pot->Sum(P2, Pkx->data, PSPECTRA_kx); CP_TO_CPU(Pkx->tmp, Pkx->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
Pkx->cpu[itp + is*grids_->Nakx] = Pkx->tmp[it + is*grids_->Nx];
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Pkx->cpu[itp + is*grids_->Nakx] = Pkx->tmp[it + is*grids_->Nx];
Pkx->cpu[itn + is*grids_->Nakx] = Pkx->tmp[itm + is*grids_->Nx];
}
}
write_nc(Pkx, endrun);
}
}
void NetCDF_ids::write_Pz(float* P2, bool endrun)
{
if (Pz -> write_v_time || (Pz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies;
pot->Sum(P2, Pz->data, PSPECTRA_z); CP_TO_CPU(Pz->cpu, Pz->data, sizeof(float)*i);
write_nc(Pz, endrun);
}
}
void NetCDF_ids::write_Pkz(float* P2, bool endrun)
{
if (Pkz -> write_v_time || (Pkz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies; int Nz = grids_->Nz;
pot->Sum(P2, Pkz->data, PSPECTRA_kz); CP_TO_CPU(Pkz->tmp, Pkz->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
if (Nz>1) {
for (int i = 0; i < Nz; i++) Pkz->cpu[i + is*Nz] = Pkz->tmp[ (i + Nz/2 + 1) % Nz + is*Nz ];
} else {
for (int i = 0; i < Nz; i++) Pkz->cpu[i + is*Nz] = Pkz->tmp[ i + is*Nz ];
}
}
write_nc(Pkz, endrun);
}
}
void NetCDF_ids::write_Pkxky(float* P2, bool endrun)
{
if (Pkxky -> write_v_time || (Pkxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx*grids_->Nspecies;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
pot->Sum(P2, Pkxky->data, PSPECTRA_kxky);
CP_TO_CPU(Pkxky->tmp, Pkxky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc *grids_->Nx;
Pkxky->cpu[Qp] = Pkxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc * NX;
int Qn = itn + ik *grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rm = ik + itm*grids_->Nyc + is*grids_->Nyc * NX;
Pkxky->cpu[Qp] = Pkxky->tmp[Rp];
Pkxky->cpu[Qn] = Pkxky->tmp[Rm];
}
}
}
write_nc(Pkxky, endrun);
}
}
void NetCDF_ids::write_Wz(float *G2, bool endrun)
{
if (Wz -> write_v_time || (Wz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies;
red->Sum(G2, Wz->data, WSPECTRA_z); CP_TO_CPU(Wz->cpu, Wz->data, sizeof(float)*i);
write_nc(Wz, endrun);
}
}
void NetCDF_ids::write_Wkz(float *G2, bool endrun)
{
if (Wkz -> write_v_time || (Wkz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies; int Nz = grids_->Nz;
red->Sum(G2, Wkz->data, WSPECTRA_kz); CP_TO_CPU(Wkz->tmp, Wkz->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
if (Nz>1) {
for (int i = 0; i < Nz; i++) Wkz->cpu[i+is*Nz] = Wkz->tmp[ (i + Nz/2 + 1) % Nz + is*Nz ];
} else {
for (int i = 0; i < Nz; i++) Wkz->cpu[i+is*Nz] = Wkz->tmp[ i + is*Nz ];
}
}
write_nc (Wkz, endrun);
}
}
void NetCDF_ids::write_Ws(float* G2, bool endrun)
{
if (Ws -> write_v_time) {
red->Sum(G2, Ws->data, WSPECTRA_species); CP_TO_CPU(Ws->cpu, Ws->data, sizeof(float)*grids_->Nspecies);
write_nc(Ws, endrun);
if (Wtot -> write_v_time) {
for (int is=0; is < grids_->Nspecies; is++) totW += Ws->cpu[is];
}
}
}
void NetCDF_ids::write_Wky(float* G2, bool endrun)
{
if (Wky -> write_v_time || (Wky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nspecies;
red->Sum(G2, Wky->data, WSPECTRA_ky); CP_TO_CPU(Wky->tmp, Wky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
for (int ik = 0; ik < grids_->Naky; ik++) {
Wky->cpu[ik + is*grids_->Naky] = Wky->tmp[ik + is*grids_->Nyc];
}
}
write_nc(Wky, endrun);
}
}
void NetCDF_ids::write_Wkx(float* G2, bool endrun)
{
if (Wkx -> write_v_time || (Wkx -> write && endrun)) {
int i = grids_->Nx*grids_->Nspecies;
int NX = grids_->Nx;
int NK = grids_->Nakx/2;
red->Sum(G2, Wkx->data, WSPECTRA_kx); CP_TO_CPU(Wkx->tmp, Wkx->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
Wkx->cpu[itp + is*grids_->Nakx] = Wkx->tmp[it + is*grids_->Nx];
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Wkx->cpu[itp + is*grids_->Nakx] = Wkx->tmp[it + is*grids_->Nx];
Wkx->cpu[itn + is*grids_->Nakx] = Wkx->tmp[itm + is*grids_->Nx];
}
}
write_nc(Wkx, endrun);
}
}
void NetCDF_ids::write_Wkxky(float* G2, bool endrun)
{
if (Wkxky -> write_v_time || (Wkxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx*grids_->Nspecies; // int NK = (grids_->Nx-1)/3+1;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
red->Sum(G2, Wkxky->data, WSPECTRA_kxky); CP_TO_CPU(Wkxky->tmp, Wkxky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc *grids_->Nx;
Wkxky->cpu[Qp] = Wkxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc * NX;
int Qn = itn + ik *grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rm = ik + itm*grids_->Nyc + is*grids_->Nyc * NX;
Wkxky->cpu[Qp] = Wkxky->tmp[Rp];
Wkxky->cpu[Qn] = Wkxky->tmp[Rm];
}
}
}
write_nc(Wkxky, endrun);
}
}
void NetCDF_ids::write_Wm(float* G2, bool endrun)
{
if (Wm -> write_v_time || (Wm -> write && endrun)) {
int i = grids_->Nm*grids_->Nspecies;
red-> Sum(G2, Wm -> data, WSPECTRA_m); CP_TO_CPU(Wm->cpu, Wm->data, sizeof(float)*i);
write_nc(Wm, endrun);
}
}
void NetCDF_ids::write_Wlm(float* G2, bool endrun)
{
if (Wlm -> write_v_time || (Wlm -> write && endrun)) {
int i = grids_->Nmoms*grids_->Nspecies;
red->Sum(G2, Wlm->data, WSPECTRA_lm); CP_TO_CPU(Wlm->cpu, Wlm->data, sizeof(float)*i);
write_nc(Wlm, endrun);
}
}
void NetCDF_ids::write_Wl(float* G2, bool endrun)
{
if (Wl -> write_v_time || (Wl -> write && endrun)) {
int i = grids_->Nl*grids_->Nspecies;
red->Sum(G2, Wl->data, WSPECTRA_l); CP_TO_CPU(Wl->cpu, Wl->data, sizeof(float)*i);
write_nc(Wl, endrun);
}
}
void NetCDF_ids::write_Ps(float* P2, bool endrun)
{
if (Ps -> write_v_time) {
pot->Sum(P2, Ps->data, PSPECTRA_species); CP_TO_CPU(Ps->cpu, Ps->data, sizeof(float)*grids_->Nspecies);
write_nc(Ps, endrun);
if (Wtot -> write_v_time) {
totW = 0.;
for (int is=0; is < grids_->Nspecies; is++) totW += Ps->cpu[is];
}
}
}
void NetCDF_ids::write_Aky(float* P2, bool endrun)
{
if (Aky -> write_v_time || (Aky -> write && endrun)) {
int i = grids_->Naky;
ph2->Sum(P2, Aky->data, ASPECTRA_ky); CP_TO_CPU(Aky->cpu, Aky->data, sizeof(float)*i);
write_nc(Aky, endrun);
}
}
void NetCDF_ids::write_Az(float* P2, bool endrun)
{
if (Az -> write_v_time || (Az -> write && endrun)) {
int i = grids_->Nz;
ph2->Sum(P2, Az->data, ASPECTRA_z); CP_TO_CPU(Az->cpu, Az->data, sizeof(float)*i);
write_nc(Az, endrun);
}
}
void NetCDF_ids::write_Akz(float* P2, bool endrun)
{
if (Akz -> write_v_time || (Akz -> write && endrun)) {
int Nz = grids_->Nz;
ph2->Sum(P2, Akz->data, ASPECTRA_kz); CP_TO_CPU(Akz->tmp, Akz->data, sizeof(float)*Nz);
if (Nz>1) {
for (int i = 0; i < Nz; i++) Akz->cpu[i] = Akz->tmp[ (i + Nz/2 + 1) % Nz ];
} else {
for (int i = 0; i < Nz; i++) Akz->cpu[i] = Akz->tmp[ i ];
}
write_nc(Akz, endrun);
}
}
void NetCDF_ids::write_Akx(float* P2, bool endrun)
{
if (Akx -> write_v_time || (Akx -> write && endrun)) {
int NX = grids_->Nx;
int NK = grids_->Nakx/2;
ph2->Sum(P2, Akx->data, ASPECTRA_kx); CP_TO_CPU(Akx->tmp, Akx->data, sizeof(float)*NX);
int it = 0;
int itp = it + NK;
Akx->cpu[itp] = Akx->tmp[it ];;
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Akx->cpu[itp] = Akx->tmp[it ];;
Akx->cpu[itn] = Akx->tmp[itm];;
}
write_nc(Akx, endrun);
}
}
void NetCDF_ids::write_Akxky(float* P2, bool endrun)
{
if (Akxky -> write_v_time || (Akxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx; int NK = grids_->Nakx/2; int NX = grids_->Nx;
ph2->Sum(P2, Akxky->data, ASPECTRA_kxky); CP_TO_CPU(Akxky->tmp, Akxky->data, sizeof(float)*i);
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx ;
int Rp = ik + it*grids_->Nyc ;
Akxky->cpu[Qp] = Akxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx ;
int Rp = ik + it*grids_->Nyc ;
int Qn = itn + ik *grids_->Nakx ;
int Rm = ik + itm*grids_->Nyc ;
Akxky->cpu[Qp] = Akxky->tmp[Rp];
Akxky->cpu[Qn] = Akxky->tmp[Rm];
}
}
write_nc(Akxky, endrun);
}
}
void NetCDF_ids::write_As(float *P2, bool endrun)
{
if (As -> write_v_time) {
ph2->Sum(P2, As->data, ASPECTRA_species); CP_TO_CPU (As->cpu, As->data, sizeof(float));
write_nc(As, endrun);
if (Wtot -> write_v_time) totW += *As->cpu;
}
}
void NetCDF_ids::write_Q (float* Q, bool endrun)
{
if (qs -> write_v_time) {
all_red->Sum(Q, qs->data); CP_TO_CPU (qs->cpu, qs->data, sizeof(float)*grids_->Nspecies);
write_nc(qs, endrun);
for (int is=0; is<grids_->Nspecies; is++) printf ("%e \t ",qs->cpu[is]);
printf("\n");
}
}
void NetCDF_ids::write_omg(hipComplex *W, bool endrun)
{
CP_TO_CPU (omg->z_tmp, W, sizeof(hipComplex)*grids_->NxNyc);
reduce2k(omg->cpu, omg->z_tmp);
write_nc(omg, endrun);
}
void NetCDF_ids::write_Wtot()
{
if (Wtot -> write_v_time) { write_nc(Wtot, totW); totW = 0.;}
}
void NetCDF_ids::close_nc_file() {
int retval;
if (retval = nc_close( file)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_close(pars_->nczid)) ERR(retval);
}
}
void NetCDF_ids::write_moment(nca *D, hipComplex *f, float* vol_fac) {
//
// If D->dx = true, take one derivative in x
// If D->d2x = true, take two derivatives in x
// Multiply by D->adj
// If D->xydata = true, output is function of (x, y) with zonal component subtracted
// If D->xdata = true, output is function of x only
// If D->scalar = true, output is sqrt (sum_kx <<f**2(kx)>>)
if (!D->write_v_time) return;
hipComplex zz = make_cuComplex(0., 0.); setval loop_R (amom, zz, D->N_);
// Perform any desired d/dx operations
if (D->d2x) {
d2x Gmom (amom, f, grids_->kx);
} else if (D->dx) {
ddx Gmom (amom, f, grids_->kx);
} else {
CP_ON_GPU (amom, f, sizeof(hipComplex)*grids_->NxNycNz);
}
// Hermite -> physical moments
if (D->adj > 1.0) {
scale_singlemom_kernel loop_R (amom, amom, D->adj); // loop_R has more elements than required but it is safe
}
if (D->xydata) {
fieldlineaverage GFLA (favg, df, amom, vol_fac); // D->tmp = <<f>>(kx), df = f - <<f>>
grad_phi -> C2R(df, D->data);
xytranspose loop_xy (D->data, D->tmp_d); // For now, take the first plane in the z-direction by default
CP_TO_CPU(D->cpu, D->tmp_d, sizeof(float)*D->Nwrite_);
write_nc(D);
return;
}
grad_phi -> C2R(amom, D->data);
yzavg loop_x (D->data, D->tmp_d, vol_fac);
CP_TO_CPU (D->cpu, D->tmp_d, sizeof(float)*D->Nwrite_);
if (D->xdata) {
write_nc(D);
return;
}
if (D->scalar) {
D->zonal = 0.;
for (int idx = 0; idx<grids_->Nx; idx++) D->zonal += D->cpu[idx] * D->cpu[idx];
D->zonal = sqrtf(D->zonal/((float) grids_->Nx));
write_zonal_nc(D);
return;
}
}
void NetCDF_ids::write_ks_data(nca *D, hipComplex *G) {
if (!D->write_v_time) return;
grad_perp->C2R(G, D->data);
CP_TO_CPU (D->cpu, D->data, sizeof(float)*D->N_);
write_nc(D);
}
void NetCDF_ids::write_ks_data(nca *D, float *G) {
if (!D->write_v_time) return;
CP_TO_CPU (D->cpu, G, sizeof(float)*D->N_);
write_nc(D);
}
// condense a (ky,kx) object for netcdf output, taking into account the mask
// and changing the type from hipComplex to float
void NetCDF_ids::reduce2k(float *fk, hipComplex* f) {
int Nx = grids_->Nx;
int Nakx = grids_->Nakx;
int Naky = grids_->Naky;
int Nyc = grids_->Nyc;
int NK = grids_->Nakx/2;
int it = 0;
int itp = it + NK;
for (int ik=0; ik<Naky; ik++) {
int Qp = itp + ik*Nakx;
int Rp = ik + it*Nyc;
fk[2*Qp ] = f[Rp].x;
fk[2*Qp+1] = f[Rp].y;
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = Nx - it;
for (int ik=0; ik<Naky; ik++) {
int Qp = itp + ik*Nakx;
int Rp = ik + it*Nyc;
int Qn = itn + ik*Nakx;
int Rm = ik + itm*Nyc;
fk[2*Qp ] = f[Rp].x;
fk[2*Qp+1] = f[Rp].y;
fk[2*Qn ] = f[Rm].x;
fk[2*Qn+1] = f[Rm].y;
}
}
}
| bc57caf4e30a154096e989ae0d66261df7b66e67.cu | #include "netcdf.h"
#include "ncdf.h"
#define GFLA <<< dgfla, dbfla >>>
#define loop_R <<< dGr, dBr >>>
#define loop_xy <<< dgxy, dbxy >>>
#define loop_x <<< dgx, dbx >>>
#define loop_y <<< dgp, dbp >>>
#define Gmom <<< dgall, dball >>>
NetCDF_ids::NetCDF_ids(Grids* grids, Parameters* pars, Geometry* geo) :
grids_(grids), pars_(pars), geo_(geo),
red(nullptr), pot(nullptr), ph2(nullptr), all_red(nullptr), grad_phi(nullptr), grad_perp(nullptr)
{
primary = nullptr; secondary = nullptr; tertiary = nullptr; amom = nullptr;
df = nullptr; favg = nullptr;
if (pars_->diagnosing_spectra || pars_->diagnosing_kzspec) {
float dum = 1.0;
red = new All_Reduce(grids_, pars_->wspectra); CUDA_DEBUG("Reductions: %s \n"); // G**2
pot = new Grid_Species_Reduce(grids_, pars_->aspectra); CUDA_DEBUG("Reductions: %s \n"); // (1-G0) Phi**2 keeping track of species
ph2 = new Grid_Reduce(grids_, pars_->aspectra); CUDA_DEBUG("Reductions: %s \n"); // Phi**2
}
int nS = grids_->Nspecies;
int nM = grids_->Nm;
int nL = grids_->Nl;
int nY = grids_->Nyc;
int nYk = grids_->Naky;
int nX = grids_->Nx;
int nXk = grids_->Nakx;
int nZ = grids_->Nz;
int nR = nX * nY * nZ;
int nK = nXk * nYk * nZ;
int nG = nR * grids_->Nmoms * nS;
theta_extended = nullptr;
char strb[263];
strcpy(strb, pars_->run_name);
strcat(strb, ".nc");
int retval, idum;
// Loop over full real-space grid
int nt1 = min(grids_->NxNyNz, 1024);
int nb1 = 1 + (grids_->NxNyNz-1)/nt1;
dBr = dim3(nt1, 1, 1);
dGr = dim3(nb1, 1, 1);
// Loop over x-space grid
nt1 = min(grids_->Nx, 512);
nb1 = 1 + (grids_->Nx-1)/nt1;
dbx = dim3(nt1, 1, 1);
dgx = dim3(nb1, 1, 1);
// Loop over y-space grid
nt1 = min(grids_->Ny, 512);
nb1 = 1 + (grids_->Ny-1)/nt1;
dbp = dim3(nt1, 1, 1);
dgp = dim3(nb1, 1, 1);
// Double loop, over y-space and x-space grids
nt1 = min(32, grids_->Ny);
nb1 = 1 + (grids_->Ny-1)/nt1;
int nt2 = min(32, grids_->Nx);
int nb2 = 1 + (grids_->Nx-1)/nt2;
dbxy = dim3(nt1, nt2, 1);
dgxy = dim3(nb1, nb2, 1);
// Single loop, over Nx*Nyc elements
nt1 = min(128, grids_->Nx);
nb1 = 1 + (grids_->Nx*grids_->Nyc-1)/nt1;
dbfla = dim3(nt1, 1, 1);
dgfla = dim3(nb1, 1, 1);
// Triple loop, native elements
int nt3, nb3;
nt1 = min(16, grids_->Nyc); nb1 = 1 + (grids_->Nyc-1)/nt1;
nt2 = min(16, grids_->Nx); nb2 = 1 + (grids_->Nx -1)/nt2;
nt3 = min(4, grids_->Nz); nb3 = 1 + (grids_->Nz -1)/nt3;
dball = dim3(nt1, nt2, nt3);
dgall = dim3(nb1, nb2, nb3);
if (pars_->write_kmom || pars_->write_xymom || pars_->write_avgz) {
int nbatch = grids_->Nz;
grad_phi = new GradPerp(grids_, nbatch, grids_->NxNycNz);
cudaMalloc (&df, sizeof(cuComplex)*grids_->NxNycNz);
cudaMalloc (&favg, sizeof(cuComplex)*grids_->Nx);
cudaMalloc (&amom, sizeof(cuComplex)*grids_->NxNycNz);
}
if (pars_->ResWrite) {
r_file = pars_->ncresid;
if (retval = nc_redef(r_file));
if (retval = nc_inq_dimid(r_file, "r", &res_dim)) ERR(retval);
if (retval = nc_inq_dimid(r_file, "time", &rtime_dim)) ERR(retval);
// v_ky[0] = res_dim;
// if (retval = nc_def_var(r_file, "r", NC_INT, 1, v_ky, &state)) ERR(retval);
}
if (pars_->write_xymom) {
z_file = pars_->nczid;
if (retval = nc_redef(z_file));
if (retval = nc_inq_dimid(z_file, "x", &zx_dim)) ERR(retval);
if (retval = nc_inq_dimid(z_file, "y", &zy_dim)) ERR(retval);
if (retval = nc_inq_dimid(z_file, "time", &ztime_dim)) ERR(retval);
v_ky[0] = zy_dim;
if (retval = nc_def_var(z_file, "y", NC_FLOAT, 1, v_ky, &zy)) ERR(retval);
v_kx[0] = zx_dim;
if (retval = nc_def_var(z_file, "x", NC_FLOAT, 1, v_kx, &zx)) ERR(retval);
}
file = pars_->ncid;
if (retval = nc_redef(file));
int ri;
// Get handles for the dimensions
if (retval = nc_inq_dimid(file, "ri", &ri)) ERR(retval);
if (retval = nc_def_dim(file, "kz", grids_->Nz, &nkz)) ERR(retval);
if (retval = nc_def_dim(file, "ky", grids_->Naky, &ky_dim)) ERR(retval);
if (retval = nc_def_dim(file, "kx", grids_->Nakx, &kx_dim)) ERR(retval);
if (retval = nc_def_dim(file, "theta", grids_->Nz, &nz)) ERR(retval);
if (retval = nc_inq_dimid(file, "x", &x_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "y", &y_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "m", &m_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "l", &l_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "s", &s_dim)) ERR(retval);
if (retval = nc_inq_dimid(file, "time", &time_dim)) ERR(retval);
if (retval = nc_def_var(file, "periodic", NC_INT, 0, 0, &periodic)) ERR(retval);
if (retval = nc_def_var(file, "local_limit", NC_INT, 0, 0, &local_limit)) ERR(retval);
v_ky[0] = ky_dim;
if (retval = nc_def_var(file, "ky", NC_FLOAT, 1, v_ky, &ky)) ERR(retval);
v_kx[0] = kx_dim;
if (retval = nc_def_var(file, "kx", NC_FLOAT, 1, v_kx, &kx)) ERR(retval);
v_kz[0] = nkz;
if (retval = nc_def_var(file, "kz", NC_FLOAT, 1, v_kz, &kz)) ERR(retval);
v_ky[0] = y_dim;
if (retval = nc_def_var(file, "y", NC_FLOAT, 1, v_ky, &y)) ERR(retval);
v_kx[0] = x_dim;
if (retval = nc_def_var(file, "x", NC_FLOAT, 1, v_kx, &x)) ERR(retval);
// v_z[0] = nz;
// if (retval = nc_def_var(file, "z", NC_FLOAT, 1, v_z, &z_h)) ERR(retval);
// z_h needs to be defined.
// Z0 would typically be q R
// and then z_h would run from - (2 pi q R)/2 : + (2 pi q R)/2
// but there are complications to get right:
// normalization of R?
// Allow for Z0 to be specified directly
// Allow nperiod > 1
int nc_sp;
if (retval = nc_inq_grp_ncid(file, "Spectra", &nc_sp)) ERR(retval);
int nc_flux;
if (retval = nc_inq_grp_ncid(file, "Fluxes", &nc_flux)) ERR(retval);
int nc_special;
if (retval = nc_inq_grp_ncid(file, "Special", &nc_special)) ERR(retval);
int nc_zonal;
if (retval = nc_inq_grp_ncid(file, "Zonal_x", &nc_zonal)) ERR(retval);
int nc_geo;
if (retval = nc_inq_grp_ncid(file, "Geometry", &nc_geo)) ERR(retval);
geo_v_theta[0] = nz; int ivar;
if (retval = nc_def_var (file, "theta", NC_FLOAT, 1, geo_v_theta, &theta)) ERR(retval);
if (retval = nc_def_var (nc_geo, "bmag", NC_FLOAT, 1, geo_v_theta, &bmag)) ERR(retval);
if (retval = nc_def_var (nc_geo, "bgrad", NC_FLOAT, 1, geo_v_theta, &bgrad)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gbdrift", NC_FLOAT, 1, geo_v_theta, &gbdrift)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gbdrift0", NC_FLOAT, 1, geo_v_theta, &gbdrift0)) ERR(retval);
if (retval = nc_def_var (nc_geo, "cvdrift", NC_FLOAT, 1, geo_v_theta, &cvdrift)) ERR(retval);
if (retval = nc_def_var (nc_geo, "cvdrift0", NC_FLOAT, 1, geo_v_theta, &cvdrift0)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds2", NC_FLOAT, 1, geo_v_theta, &gds2)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds21", NC_FLOAT, 1, geo_v_theta, &gds21)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gds22", NC_FLOAT, 1, geo_v_theta, &gds22)) ERR(retval);
if (retval = nc_def_var (nc_geo, "grho", NC_FLOAT, 1, geo_v_theta, &grho)) ERR(retval);
if (retval = nc_def_var (nc_geo, "jacobian", NC_FLOAT, 1, geo_v_theta, &jacobian)) ERR(retval);
if (retval = nc_def_var (nc_geo, "gradpar", NC_FLOAT, 0, NULL, &ivar)) ERR(retval);
////////////////////////////
// //
// TIME //
// //
////////////////////////////
if (pars_->ResWrite) {
r_time = new nca(0);
r_time -> write_v_time = true;
r_time -> file = r_file;
r_time -> time_dims[0] = rtime_dim;
if (retval = nc_def_var(r_file, "time", NC_DOUBLE, 1, r_time -> time_dims, &r_time -> time)) ERR(retval);
}
if (pars_->write_xymom) {
z_time = new nca(0);
z_time -> write_v_time = true;
z_time -> file = z_file;
z_time -> time_dims[0] = ztime_dim;
if (retval = nc_def_var(z_file, "time", NC_DOUBLE, 1, z_time -> time_dims, &z_time -> time)) ERR(retval);
}
time = new nca(0);
time -> write_v_time = true;
time -> file = file;
time -> time_dims[0] = time_dim;
if (retval = nc_def_var(file, "time", NC_DOUBLE, 1, time -> time_dims, &time -> time)) ERR(retval);
////////////////////////////
// //
// DENSITY //
// //
////////////////////////////
if (pars_->write_moms) {
den = new nca(0);
den -> write = true;
den -> dims[0] = s_dim;
den -> dims[1] = nz;
den -> dims[2] = kx_dim;
den -> dims[3] = ky_dim;
den -> dims[4] = ri;
den -> file = nc_special;
if (retval = nc_def_var(nc_special, "density", NC_FLOAT, 5, den->dims, &den->idx )) ERR(retval);
den -> start[0] = 0;
den -> start[1] = 0;
den -> start[2] = 0;
den -> start[3] = 0;
den -> start[4] = 0;
den -> count[0] = grids_->Nspecies;
den -> count[1] = grids_->Nz;
den -> count[2] = grids_->Nakx;
den -> count[3] = grids_->Naky;
den -> count[4] = 2;
den -> ns = grids_->Nspecies;
} else {
den = new nca(0);
}
////////////////////////////
// //
// DENSITY(t=0) //
// //
////////////////////////////
if (pars_->write_moms) {
den0 = new nca(0);
den0 -> write = true;
den0 -> dims[0] = s_dim;
den0 -> dims[1] = nz;
den0 -> dims[2] = kx_dim;
den0 -> dims[3] = ky_dim;
den0 -> dims[4] = ri;
den0 -> file = nc_special;
if (retval = nc_def_var(nc_special, "density0", NC_FLOAT, 5, den0 -> dims, &den0 -> idx )) ERR(retval);
den0 -> start[0] = 0;
den0 -> start[1] = 0;
den0 -> start[2] = 0;
den0 -> start[3] = 0;
den0 -> start[4] = 0;
den0 -> count[0] = grids_->Nspecies;
den0 -> count[1] = grids_->Nz;
den0 -> count[2] = grids_->Nakx;
den0 -> count[3] = grids_->Naky;
den0 -> count[4] = 2;
den0 -> ns = grids_->Nspecies;
} else {
den0 = new nca(0);
}
////////////////////////////
// //
// Phi //
// //
////////////////////////////
if (pars_->write_phi) {
wphi = new nca(0);
wphi -> write = true;
wphi -> dims[0] = nz;
wphi -> dims[1] = kx_dim;
wphi -> dims[2] = ky_dim;
wphi -> dims[3] = ri;
wphi -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi", NC_FLOAT, 4, wphi -> dims, &wphi -> idx )) ERR(retval);
wphi -> start[0] = 0;
wphi -> start[1] = 0;
wphi -> start[2] = 0;
wphi -> start[3] = 0;
wphi -> count[0] = grids_->Nz;
wphi -> count[1] = grids_->Nakx;
wphi -> count[2] = grids_->Naky;
wphi -> count[3] = 2;
wphi -> ns = 1;
} else {
wphi = new nca(0);
}
////////////////////////////
// //
// Phi(t=0) //
// //
////////////////////////////
if (pars_->write_phi) {
wphi0 = new nca(0);
wphi0 -> write = true;
wphi0 -> dims[0] = nz;
wphi0 -> dims[1] = kx_dim;
wphi0 -> dims[2] = ky_dim;
wphi0 -> dims[3] = ri;
wphi0 -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi0", NC_FLOAT, 4, wphi0 -> dims, &wphi0 -> idx )) ERR(retval);
wphi0 -> start[0] = 0;
wphi0 -> start[1] = 0;
wphi0 -> start[2] = 0;
wphi0 -> start[3] = 0;
wphi0 -> count[0] = grids_->Nz;
wphi0 -> count[1] = grids_->Nakx;
wphi0 -> count[2] = grids_->Naky;
wphi0 -> count[3] = 2;
wphi0 -> ns = 1;
} else {
wphi0 = new nca(0);
}
////////////////////////////
// //
// DENSITY(kpar) //
// //
////////////////////////////
if (pars_->write_phi_kpar and pars_->write_moms) {
denk = new nca(0);
denk -> write = true;
denk -> dims[0] = s_dim;
denk -> dims[1] = nkz;
denk -> dims[2] = kx_dim;
denk -> dims[3] = ky_dim;
denk -> dims[4] = ri;
denk -> file = nc_special;
if (retval = nc_def_var(nc_special, "density_kpar", NC_FLOAT, 5, denk -> dims, &denk -> idx)) ERR(retval);
denk -> start[0] = 0;
denk -> start[1] = 0;
denk -> start[2] = 0;
denk -> start[3] = 0;
denk -> start[4] = 0;
denk -> count[0] = grids_->Nspecies;
denk -> count[1] = grids_->Nz;
denk -> count[2] = grids_->Nakx;
denk -> count[3] = grids_->Naky;
denk -> count[4] = 2;
denk -> ns = 1;
} else {
denk = new nca(0);
}
////////////////////////////
// //
// Phi(kpar) //
// //
////////////////////////////
if (pars_->write_phi_kpar) {
wphik = new nca(0);
wphik -> write = true;
wphik -> dims[0] = nkz;
wphik -> dims[1] = kx_dim;
wphik -> dims[2] = ky_dim;
wphik -> dims[3] = ri;
wphik -> file = nc_special;
if (retval = nc_def_var(nc_special, "phi2_kz", NC_FLOAT, 4, wphik -> dims, &wphik -> idx)) ERR(retval);
wphik -> start[0] = 0;
wphik -> start[1] = 0;
wphik -> start[2] = 0;
wphik -> start[3] = 0;
wphik -> count[0] = grids_->Nz;
wphik -> count[1] = grids_->Nakx;
wphik -> count[2] = grids_->Naky;
wphik -> count[3] = 2;
wphik -> ns = 1;
} else {
wphik = new nca(0);
}
////////////////////////////
// //
// Frequencies //
// //
////////////////////////////
if (pars_->write_omega) {
omg = new nca(-nX * nY, 2 * nXk * nYk);
omg -> write_v_time = true;
omg -> time_dims[0] = time_dim;
omg -> time_dims[1] = ky_dim;
omg -> time_dims[2] = kx_dim;
omg -> time_dims[3] = ri;
omg -> file = nc_special;
if (retval = nc_def_var(nc_special, "omega_v_time", NC_FLOAT, 4, omg -> time_dims, &omg -> time)) ERR(retval);
omg -> time_start[0] = 1;
omg -> time_count[1] = grids_->Naky;
omg -> time_count[2] = grids_->Nakx;
omg -> time_count[3] = 2;
for (int i=0; i < nXk * nYk * 2; i++) omg->cpu[i] = 0.;
} else {
omg = new nca(0);
}
////////////////////////////
// //
// Rosenbluth-Hinton //
// //
////////////////////////////
if (pars_->write_rh) {
rh = new nca(0);
rh -> write = true;
rh -> time_dims[0] = time_dim;
rh -> time_dims[1] = ri;
if (retval = nc_def_var(nc_special, "phi_rh", NC_FLOAT, 2, rh -> time_dims, &rh -> time)) ERR(retval);
rh -> time_count[1] = 2;
} else {
rh = new nca(0);
}
////////////////////////////
// //
// PZT estimates //
// //
////////////////////////////
if (pars_->write_pzt) {
Pzt = new nca(0);
pZt = new nca(0);
pzT = new nca(0);
Pzt -> write_v_time = true;
Pzt -> time_dims[0] = time_dim;
pZt -> time_dims[0] = time_dim;
pzT -> time_dims[0] = time_dim;
Pzt -> file = nc_special;
pZt -> file = nc_special;
pzT -> file = nc_special;
if (retval = nc_def_var(nc_special, "prim", NC_FLOAT, 1, Pzt -> time_dims, &Pzt -> idx)) ERR(retval);
if (retval = nc_def_var(nc_special, "sec", NC_FLOAT, 1, pZt -> time_dims, &pZt -> idx)) ERR(retval);
if (retval = nc_def_var(nc_special, "tert", NC_FLOAT, 1, pzT -> time_dims, &pzT -> idx)) ERR(retval);
cudaMallocHost (&primary, sizeof(float)); primary[0] = 0.;
cudaMallocHost (&secondary, sizeof(float)); secondary[0] = 0.;
cudaMallocHost (&tertiary, sizeof(float)); tertiary[0] = 0.;
cudaMalloc (&t_bar, sizeof(cuComplex) * nR * nS);
} else {
Pzt = new nca(0);
pZt = new nca(0);
pzT = new nca(0);
}
////////////////////////////
// //
// (1-G0)phi**2 (species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_species] > 0) {
Ps = new nca(nS);
Ps -> write_v_time = true;
Ps -> time_dims[0] = time_dim;
Ps -> time_dims[1] = s_dim;
Ps -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pst", NC_FLOAT, 2, Ps -> time_dims, &Ps -> time)) ERR(retval);
Ps -> time_count[1] = grids_->Nspecies;
} else {
Ps = new nca(0);
}
////////////////////////////
// //
// P (kx, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kx] > 0) {
Pkx = new nca(nX*nS, nXk*nS);
Pkx -> write_v_time = true;
Pkx -> time_dims[0] = time_dim;
Pkx -> time_dims[1] = s_dim;
Pkx -> time_dims[2] = kx_dim;
Pkx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkxst", NC_FLOAT, 3, Pkx -> time_dims, &Pkx -> time)) ERR(retval);
Pkx -> time_count[1] = grids_->Nspecies;
Pkx -> time_count[2] = grids_->Nakx;
} else {
Pkx = new nca(0);
}
////////////////////////////
// //
// P (ky, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_ky] > 0) {
Pky = new nca(nY*nS, nYk*nS);
Pky -> write_v_time = true;
Pky -> time_dims[0] = time_dim;
Pky -> time_dims[1] = s_dim;
Pky -> time_dims[2] = ky_dim;
Pky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkyst", NC_FLOAT, 3, Pky->time_dims, &Pky->time)) ERR(retval);
Pky -> time_count[1] = grids_->Nspecies;
Pky -> time_count[2] = grids_->Naky;
} else {
Pky = new nca(0);
}
////////////////////////////
// //
// P (kz, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kz] > 0) {
Pkz = new nca(nZ*nS, nZ*nS);
Pkz -> write_v_time = true;
Pkz -> time_dims[0] = time_dim;
Pkz -> time_dims[1] = s_dim;
Pkz -> time_dims[2] = nkz;
Pkz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkzst", NC_FLOAT, 3, Pkz -> time_dims, &Pkz -> time)) ERR(retval);
Pkz -> time_count[1] = grids_->Nspecies;
Pkz -> time_count[2] = grids_->Nz;
} else {
Pkz = new nca(0);
}
////////////////////////////
// //
// P (z, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_z] > 0) {
Pz = new nca(nZ*nS);
Pz -> write_v_time = true;
Pz -> time_dims[0] = time_dim;
Pz -> time_dims[1] = s_dim;
Pz -> time_dims[2] = nz;
Pz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pzst", NC_FLOAT, 3, Pz -> time_dims, &Pz -> time)) ERR(retval);
Pz -> time_count[1] = grids_->Nspecies;
Pz -> time_count[2] = grids_->Nz;
} else {
Pz = new nca(0);
}
////////////////////////////
// //
// P (kx,ky, species) //
// //
////////////////////////////
if (pars_->pspectra[PSPECTRA_kxky] > 0) {
Pkxky = new nca(nX * nY * nS, nXk * nYk * nS);
Pkxky -> write_v_time = true;
Pkxky -> time_dims[0] = time_dim;
Pkxky -> time_dims[1] = s_dim;
Pkxky -> time_dims[2] = ky_dim;
Pkxky -> time_dims[3] = kx_dim;
Pkxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Pkxkyst", NC_FLOAT, 4, Pkxky -> time_dims, &Pkxky -> time)) ERR(retval);
Pkxky -> time_count[1] = grids_->Nspecies;
Pkxky -> time_count[2] = grids_->Naky;
Pkxky -> time_count[3] = grids_->Nakx;
} else {
Pkxky = new nca(0);
}
////////////////////////////
// //
// W (species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_species] > 0) {
Ws = new nca(nS);
Ws -> write_v_time = true;
Ws -> time_dims[0] = time_dim;
Ws -> time_dims[1] = s_dim;
Ws -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wst", NC_FLOAT, 2, Ws -> time_dims, &Ws -> time)) ERR(retval);
Ws -> time_count[1] = grids_->Nspecies;
} else {
Ws = new nca(0);
}
////////////////////////////
// //
// W (kx, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kx] > 0) {
Wkx = new nca(nX*nS, nXk*nS);
Wkx -> write_v_time = true;
Wkx -> time_dims[0] = time_dim;
Wkx -> time_dims[1] = s_dim;
Wkx -> time_dims[2] = kx_dim;
Wkx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkxst", NC_FLOAT, 3, Wkx -> time_dims, &Wkx -> time)) ERR(retval);
Wkx -> time_count[1] = grids_->Nspecies;
Wkx -> time_count[2] = grids_->Nakx;
} else {
Wkx = new nca(0);
}
////////////////////////////
// //
// W (ky, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_ky] > 0) {
Wky = new nca(nY*nS, nYk*nS);
Wky -> write_v_time = true;
Wky -> time_dims[0] = time_dim;
Wky -> time_dims[1] = s_dim;
Wky -> time_dims[2] = ky_dim;
Wky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkyst", NC_FLOAT, 3, Wky -> time_dims, &Wky -> time)) ERR(retval);
Wky -> time_count[1] = grids_->Nspecies;
Wky -> time_count[2] = grids_->Naky;
} else {
Wky = new nca(0);
}
////////////////////////////
// //
// W (kz, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kz] > 0) {
Wkz = new nca(nZ * nS, nZ * nS);
Wkz -> write_v_time = true;
Wkz -> time_dims[0] = time_dim;
Wkz -> time_dims[1] = s_dim;
Wkz -> time_dims[2] = nkz;
Wkz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkzst", NC_FLOAT, 3, Wkz -> time_dims, &Wkz -> time)) ERR(retval);
Wkz -> time_count[1] = grids_->Nspecies;
Wkz -> time_count[2] = grids_->Nz;
} else {
Wkz = new nca(0);
}
////////////////////////////
// //
// W (z, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_z] > 0) {
Wz = new nca(nZ*nS);
Wz -> write_v_time = true;
Wz -> time_dims[0] = time_dim;
Wz -> time_dims[1] = s_dim;
Wz -> time_dims[2] = nz;
Wz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wzst", NC_FLOAT, 3, Wz -> time_dims, &Wz -> time)) ERR(retval);
Wz -> time_count[1] = grids_->Nspecies;
Wz -> time_count[2] = grids_->Nz;
} else {
Wz = new nca(0);
}
////////////////////////////
// //
// W (kx,ky, species) //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_kxky] > 0) {
Wkxky = new nca(nX * nY * nS, nXk * nYk * nS);
Wkxky -> write_v_time = true;
Wkxky -> time_dims[0] = time_dim;
Wkxky -> time_dims[1] = s_dim;
Wkxky -> time_dims[2] = ky_dim;
Wkxky -> time_dims[3] = kx_dim;
Wkxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wkxkyst", NC_FLOAT, 4, Wkxky -> time_dims, &Wkxky -> time)) ERR(retval);
Wkxky -> time_count[1] = grids_->Nspecies;
Wkxky -> time_count[2] = grids_->Naky;
Wkxky -> time_count[3] = grids_->Nakx;
} else {
Wkxky = new nca(0);
}
////////////////////////////
// //
// W (adiabatic species) //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_species] > 0) {
As = new nca(1);
As -> write_v_time;
As -> time_dims[0] = time_dim;
As -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "At", NC_FLOAT, 1, As -> time_dims, &As -> time)) ERR(retval);
} else {
As = new nca(0);
}
////////////////////////////
// //
// W (kx) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kx] > 0) {
Akx = new nca(nX, nXk);
Akx -> write_v_time = true;
Akx -> time_dims[0] = time_dim;
Akx -> time_dims[1] = kx_dim;
Akx -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akxst", NC_FLOAT, 2, Akx -> time_dims, &Akx -> time)) ERR(retval);
Akx -> time_count[1] = grids_->Nakx;
} else {
Akx = new nca(0);
}
////////////////////////////
// //
// W (ky) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_ky] > 0) {
Aky = new nca(nY, nYk);
Aky -> write_v_time = true;
Aky -> time_dims[0] = time_dim;
Aky -> time_dims[1] = ky_dim;
Aky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akyst", NC_FLOAT, 2, Aky -> time_dims, &Aky -> time)) ERR(retval);
Aky -> time_count[1] = grids_->Naky;
} else {
Aky = new nca(0);
}
////////////////////////////
// //
// A (kz) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kz] > 0) {
Akz = new nca(nZ, nZ);
Akz -> write_v_time = true;
Akz -> time_dims[0] = time_dim;
Akz -> time_dims[1] = nkz;
Akz -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akzst", NC_FLOAT, 2, Akz -> time_dims, &Akz -> time)) ERR(retval);
Akz -> time_count[1] = grids_->Nz;
} else {
Akz = new nca(0);
}
////////////////////////////
// //
// A (z) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_z] > 0) {
Az = new nca(nZ);
Az -> write_v_time = true;
Az -> time_dims[0] = time_dim;
Az -> time_dims[1] = nz;
Az -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Azst", NC_FLOAT, 2, Az -> time_dims, &Az -> time)) ERR(retval);
Az -> time_count[1] = grids_->Nz;
} else {
Az = new nca(0);
}
////////////////////////////
// //
// W (kx,ky) adiabatic //
// //
////////////////////////////
if (pars_->aspectra[ASPECTRA_kxky] > 0) {
Akxky = new nca(nX * nY, nXk * nYk);
Akxky -> write_v_time = true;
Akxky -> time_dims[0] = time_dim;
Akxky -> time_dims[1] = ky_dim;
Akxky -> time_dims[2] = kx_dim;
Akxky -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Akxkyst", NC_FLOAT, 3, Akxky -> time_dims, &Akxky -> time)) ERR(retval);
Akxky -> time_count[1] = grids_->Naky;
Akxky -> time_count[2] = grids_->Nakx;
} else {
Akxky = new nca(0);
}
////////////////////////////
// //
// Lag-Herm spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_lm] > 0) {
Wlm = new nca(nL*nM*nS);
Wlm -> write_v_time = true;
Wlm -> time_dims[0] = time_dim;
Wlm -> time_dims[1] = s_dim;
Wlm -> time_dims[2] = m_dim;
Wlm -> time_dims[3] = l_dim;
Wlm -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wlmst", NC_FLOAT, 4, Wlm -> time_dims, &Wlm -> time)) ERR(retval);
Wlm -> time_count[1] = grids_->Nspecies;
Wlm -> time_count[2] = grids_->Nm;
Wlm -> time_count[3] = grids_->Nl;
} else {
Wlm = new nca(0);
}
////////////////////////////
// //
// Laguerre spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_l] > 0) {
Wl = new nca(nL*nS);
Wl -> write_v_time = true;
Wl -> time_dims[0] = time_dim;
Wl -> time_dims[1] = s_dim;
Wl -> time_dims[2] = l_dim;
Wl -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wlst", NC_FLOAT, 3, Wl -> time_dims, &Wl -> time)) ERR(retval);
Wl -> time_count[1] = grids_->Nspecies;
Wl -> time_count[2] = grids_->Nl;
} else {
Wl = new nca(0);
}
////////////////////////////
// //
// Hermite spectrum //
// //
////////////////////////////
if (pars_->wspectra[WSPECTRA_m] > 0) {
Wm = new nca(nM*nS);
Wm -> write_v_time = true;
Wm -> time_dims[0] = time_dim;
Wm -> time_dims[1] = s_dim;
Wm -> time_dims[2] = m_dim;
Wm -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "Wmst", NC_FLOAT, 3, Wm -> time_dims, &Wm -> time)) ERR(retval);
Wm -> time_count[1] = grids_->Nspecies;
Wm -> time_count[2] = grids_->Nm;
} else {
Wm = new nca(0);
}
bool linked = (not pars_->local_limit && not pars_->boundary_option_periodic);
/*
if (linked && false) {
zkxky[0] = nz;
zkxky[1] = kx_dim;
zkxky[2] = ky_dim;
zkxky -> file = nc_special;
if (retval = nc_def_var(nc_special, "theta_x", NC_FLOAT, 3, zkxky, &theta_x)) ERR(retval);
}
*/
////////////////////////////
// //
// <v_ExB>_y,z (x) //
// //
////////////////////////////
if (pars_->write_vEy) {
vEy = new nca(grids_->NxNyNz, grids_->Nx);
vEy -> write_v_time = true;
vEy -> time_dims[0] = time_dim;
vEy -> time_dims[1] = x_dim;
vEy -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "vEy_xt", NC_FLOAT, 2, vEy->time_dims, &vEy->time)) ERR(retval);
vEy -> time_count[1] = grids_->Nx;
vEy -> xdata = true;
vEy -> dx = true;
} else {
vEy = new nca(0);
}
if (pars_->write_avg_zvE) {
avg_zvE = new nca(grids_->NxNyNz, grids_->Nx);
avg_zvE -> write_v_time = true;
avg_zvE -> time_dims[0] = time_dim;
avg_zvE -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zvE_t", NC_FLOAT, 1,
avg_zvE->time_dims, &avg_zvE->time)) ERR(retval);
avg_zvE -> scalar = true;
avg_zvE -> dx = true;
} else {
avg_zvE = new nca(0);
}
if (pars_->write_avg_zkxvEy) {
avg_zkxvEy = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkxvEy -> write_v_time = true;
avg_zkxvEy -> time_dims[0] = time_dim;
avg_zkxvEy -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkxvEy_t", NC_FLOAT, 1,
avg_zkxvEy->time_dims, &avg_zkxvEy->time)) ERR(retval);
avg_zkxvEy -> scalar = true;
avg_zkxvEy -> d2x = true;
} else {
avg_zkxvEy = new nca(0);
}
if (pars_->write_avg_zkden) {
avg_zkden = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkden -> write_v_time = true;
avg_zkden -> time_dims[0] = time_dim;
avg_zkden -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkden_t", NC_FLOAT, 1,
avg_zkden->time_dims, &avg_zkden->time)) ERR(retval);
avg_zkden -> scalar = true;
avg_zkden -> dx = true;
} else {
avg_zkden = new nca(0);
}
if (pars_->write_avg_zkUpar) {
avg_zkUpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkUpar -> write_v_time = true;
avg_zkUpar -> time_dims[0] = time_dim;
avg_zkUpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkUpar_t", NC_FLOAT, 1,
avg_zkUpar->time_dims, &avg_zkUpar->time)) ERR(retval);
avg_zkUpar -> scalar = true;
avg_zkUpar -> dx = true;
} else {
avg_zkUpar = new nca(0);
}
if (pars_->write_avg_zkTpar) {
avg_zkTpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkTpar -> write_v_time = true;
avg_zkTpar -> time_dims[0] = time_dim;
avg_zkTpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkTpar_t", NC_FLOAT, 1,
avg_zkTpar->time_dims, &avg_zkTpar->time)) ERR(retval);
avg_zkTpar -> scalar = true;
avg_zkTpar -> dx = true;
avg_zkTpar -> adj = sqrtf(2.0);
} else {
avg_zkTpar = new nca(0);
}
if (pars_->write_avg_zkqpar) {
avg_zkqpar = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkqpar -> write_v_time = true;
avg_zkqpar -> time_dims[0] = time_dim;
avg_zkqpar -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkqpar_t", NC_FLOAT, 1,
avg_zkqpar->time_dims, &avg_zkqpar->time)) ERR(retval);
avg_zkqpar -> scalar = true;
avg_zkqpar -> dx = true;
avg_zkqpar -> adj = sqrtf(6.0);
} else {
avg_zkqpar = new nca(0);
}
if (pars_->write_avg_zkTperp) {
avg_zkTperp = new nca(grids_->NxNyNz, grids_->Nx);
avg_zkTperp -> write_v_time = true;
avg_zkTperp -> time_dims[0] = time_dim;
avg_zkTperp -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "avg_zkTperp_t", NC_FLOAT, 1,
avg_zkTperp->time_dims, &avg_zkTperp->time)) ERR(retval);
avg_zkTperp -> scalar = true;
avg_zkTperp -> dx = true;
} else {
avg_zkTperp = new nca(0);
}
////////////////////////////
// //
// <d/dx v_ExB>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kxvEy) {
kxvEy = new nca(grids_->NxNyNz, grids_->Nx);
kxvEy -> write_v_time = true;
kxvEy -> time_dims[0] = time_dim;
kxvEy -> time_dims[1] = x_dim;
kxvEy -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kxvEy_xt", NC_FLOAT, 2, kxvEy -> time_dims, &kxvEy -> time)) ERR(retval);
kxvEy -> time_count[1] = grids_->Nx;
kxvEy -> xdata = true;
kxvEy -> d2x = true;
} else {
kxvEy = new nca(0);
}
////////////////////////////
// //
// <d/dx denh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kden) {
kden = new nca(grids_->NxNyNz, grids_->Nx);
kden -> write_v_time = true;
kden -> time_dims[0] = time_dim;
kden -> time_dims[1] = x_dim;
kden -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kden_xt", NC_FLOAT, 2, kden->time_dims, &kden->time)) ERR(retval);
kden -> time_count[1] = grids_->Nx;
kden -> xdata = true;
kden -> dx = true;
} else {
kden = new nca(0);
}
////////////////////////////
// //
// <d/dx uparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kUpar) {
kUpar = new nca(grids_->NxNyNz, grids_->Nx);
kUpar -> write_v_time = true;
kUpar -> time_dims[0] = time_dim;
kUpar -> time_dims[1] = x_dim;
kUpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kUpar_xt", NC_FLOAT, 2, kUpar->time_dims, &kUpar->time)) ERR(retval);
kUpar->time_count[1] = grids_->Nx;
kUpar->xdata = true;
kUpar -> dx = true;
} else {
kUpar = new nca(0);
}
////////////////////////////
// //
// <d/dx Tparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kTpar) {
kTpar = new nca(grids_->NxNyNz, grids_->Nx);
kTpar->write_v_time = true;
kTpar -> time_dims[0] = time_dim;
kTpar -> time_dims[1] = x_dim;
kTpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kTpar_xt", NC_FLOAT, 2, kTpar->time_dims, &kTpar->time)) ERR(retval);
kTpar -> time_count[1] = grids_->Nx;
kTpar -> xdata = true;
kTpar -> dx = true;
kTpar -> adj = sqrtf(2.0);
} else {
kTpar = new nca(0);
}
////////////////////////////
// //
// <d/dx Tperph>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kTperp) {
kTperp = new nca(grids_->NxNyNz, grids_->Nx);
kTperp -> write_v_time = true;
kTperp -> time_dims[0] = time_dim;
kTperp -> time_dims[1] = x_dim;
kTperp -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kTperp_xt", NC_FLOAT, 2, kTperp->time_dims, &kTperp->time)) ERR(retval);
kTperp -> time_count[1] = grids_->Nx;
kTperp -> xdata = true;
kTperp -> dx = true;
} else {
kTperp = new nca(0);
}
////////////////////////////
// //
// <d/dx qparh>_y,z (x) //
// //
////////////////////////////
if (pars_->write_kqpar) {
kqpar = new nca(grids_->NxNyNz, grids_->Nx);
kqpar -> write_v_time = true;
kqpar -> time_dims[0] = time_dim;
kqpar -> time_dims[1] = x_dim;
kqpar -> file = nc_zonal;
if (retval = nc_def_var(nc_zonal, "kqpar_xt", NC_FLOAT, 2, kqpar -> time_dims, &kqpar->time)) ERR(retval);
kqpar -> time_count[1] = grids_->Nx;
kqpar -> xdata = true;
kqpar -> dx = true;
kqpar -> adj = sqrtf(6.0);
} else {
kqpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <v_ExB> (x, y) //
// //
////////////////////////////
if (pars_->write_xyvEy) {
xyvEy = new nca(grids_->NxNyNz, grids_->NxNy);
xyvEy->write_v_time = true;
xyvEy -> time_dims[0] = ztime_dim;
xyvEy -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyvEy -> time_dims[2] = zx_dim;
xyvEy -> file = z_file;
if (retval = nc_def_var(z_file, "vEy_xyt", NC_FLOAT, 3, xyvEy -> time_dims, &xyvEy->time)) ERR(retval);
xyvEy -> time_count[1] = grids_->Ny;
xyvEy -> time_count[2] = grids_->Nx;
xyvEy -> xydata = true;
xyvEy -> dx = true;
} else {
xyvEy = new nca(0);
}
////////////////////////////
// Non-zonal //
// <d/dx v_ExB,y> (x, y) //
// //
////////////////////////////
if (pars_ -> write_xykxvEy) {
xykxvEy = new nca(grids_->NxNyNz, grids_->NxNy);
xykxvEy -> write_v_time = true;
xykxvEy -> time_dims[0] = ztime_dim;
xykxvEy -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xykxvEy -> time_dims[2] = zx_dim;
xykxvEy -> file = z_file;
if (retval = nc_def_var(z_file, "kxvEy_xyt", NC_FLOAT, 3, xykxvEy -> time_dims, &xykxvEy->time)) ERR(retval);
xykxvEy -> time_count[1] = grids_->Ny;
xykxvEy -> time_count[2] = grids_->Nx;
xykxvEy -> xydata = true;
xykxvEy -> d2x = true;
} else {
xykxvEy = new nca(0);
}
////////////////////////////
// Non-zonal //
// <den> (x, y) //
// //
////////////////////////////
if (pars_->write_xyden) {
xyden = new nca(grids_->NxNyNz, grids_->NxNy);
xyden->write_v_time = true;
xyden -> time_dims[0] = ztime_dim;
xyden -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyden -> time_dims[2] = zx_dim;
xyden -> file = z_file;
if (retval = nc_def_var(z_file, "den_xyt", NC_FLOAT, 3, xyden -> time_dims, &xyden->time)) ERR(retval);
xyden -> time_count[1] = grids_->Ny;
xyden -> time_count[2] = grids_->Nx;
xyden -> xydata = true;
} else {
xyden = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Upar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyUpar) {
xyUpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyUpar->write_v_time = true;
xyUpar -> time_dims[0] = ztime_dim;
xyUpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyUpar -> time_dims[2] = zx_dim;
xyUpar -> file = z_file;
if (retval = nc_def_var(z_file, "upar_xyt", NC_FLOAT, 3, xyUpar -> time_dims, &xyUpar->time)) ERR(retval);
xyUpar -> time_count[1] = grids_->Ny;
xyUpar -> time_count[2] = grids_->Nx;
xyUpar -> xydata = true;
} else {
xyUpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Tpar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyTpar) {
xyTpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyTpar->write_v_time = true;
xyTpar -> time_dims[0] = ztime_dim;
xyTpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyTpar -> time_dims[2] = zx_dim;
xyTpar -> file = z_file;
if (retval = nc_def_var(z_file, "Tpar_xyt", NC_FLOAT, 3, xyTpar -> time_dims, &xyTpar->time)) ERR(retval);
xyTpar -> time_count[1] = grids_->Ny;
xyTpar -> time_count[2] = grids_->Nx;
xyTpar -> xydata = true;
xyTpar -> adj = sqrtf(2.0);
} else {
xyTpar = new nca(0);
}
////////////////////////////
// Non-zonal //
// <Tperp> (x, y) //
// //
////////////////////////////
if (pars_->write_xyTperp) {
xyTperp = new nca(grids_->NxNyNz, grids_->NxNy);
xyTperp -> write_v_time = true;
xyTperp -> time_dims[0] = ztime_dim;
xyTperp -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyTperp -> time_dims[2] = zx_dim;
xyTperp -> file = z_file;
if (retval = nc_def_var(z_file, "Tperp_xyt", NC_FLOAT, 3, xyTperp -> time_dims, &xyTperp->time)) ERR(retval);
xyTperp -> time_count[1] = grids_->Ny;
xyTperp -> time_count[2] = grids_->Nx;
xyTperp -> xydata = true;
} else {
xyTperp = new nca(0);
}
////////////////////////////
// Non-zonal //
// <qpar> (x, y) //
// //
////////////////////////////
if (pars_->write_xyqpar) {
xyqpar = new nca(grids_->NxNyNz, grids_->NxNy);
xyqpar->write_v_time = true;
xyqpar -> time_dims[0] = ztime_dim;
xyqpar -> time_dims[1] = zy_dim; // Transpose to accommodate ncview
xyqpar -> time_dims[2] = zx_dim;
xyqpar -> file = z_file;
if (retval = nc_def_var(z_file, "qpar_xyt", NC_FLOAT, 3, xyqpar -> time_dims, &xyqpar->time)) ERR(retval);
xyqpar -> time_count[1] = grids_->Ny;
xyqpar -> time_count[2] = grids_->Nx;
xyqpar -> xydata = true;
xyqpar -> adj = sqrtf(6.0);
} else {
xyqpar = new nca(0);
}
if (pars_->ks && pars_->ResWrite) {
r_y = new nca(pars_->ResQ * grids_->NxNyNz * grids_->Nmoms);
r_y -> write_v_time = true;
r_y -> time_dims[0] = rtime_dim;
r_y -> time_dims[1] = res_dim;
r_y -> file = r_file;
if (retval = nc_def_var(r_file, "r", NC_DOUBLE, 2, r_y -> time_dims, &r_y -> time)) ERR(retval);
r_y -> time_count[1] = pars_->ResQ * grids_->NxNyNz*grids_->Nmoms;
} else {
r_y = new nca(0);
}
////////////////////////////
// //
// g(y) for K-S eqn //
// //
////////////////////////////
if (pars_->ks && pars_->write_ks) {
g_y = new nca(grids_->Ny);
g_y -> write_v_time = true;
g_y -> time_dims[0] = time_dim;
g_y -> time_dims[1] = y_dim;
g_y -> file = nc_special;
if (retval = nc_def_var(nc_special, "g_yt", NC_FLOAT, 2, g_y -> time_dims, &g_y -> time)) ERR(retval);
g_y -> time_count[1] = grids_->Ny;
int nbatch = 1;
grad_perp = new GradPerp(grids_, nbatch, grids_->Nyc);
} else {
g_y = new nca(0);
}
////////////////////////////
// //
// Free energy //
// //
////////////////////////////
if (pars_->write_free_energy) {
Wtot = new nca(0);
Wtot -> write_v_time = true;
Wtot -> time_dims[0] = time_dim;
Wtot -> file = nc_sp;
if (retval = nc_def_var(nc_sp, "W", NC_FLOAT, 1, Wtot -> time_dims, &Wtot -> time)) ERR(retval);
totW = 0.;
} else {
Wtot = new nca(0);
}
////////////////////////////
// //
// Heat fluxes //
// //
////////////////////////////
if (pars_->write_fluxes ) {
qs = new nca(nS);
qs -> write_v_time = true;
qs -> time_dims[0] = time_dim;
qs -> time_dims[1] = s_dim;
qs -> file = nc_flux;
if (retval = nc_def_var(nc_flux, "qflux", NC_FLOAT, 2, qs -> time_dims, &qs -> time)) ERR(retval);
qs -> time_count[1] = grids_->Nspecies;
all_red = new Species_Reduce(nR, nS); cudaDeviceSynchronize(); CUDA_DEBUG("Reductions: %s \n");
} else {
qs = new nca(0);
}
DEBUGPRINT("ncdf: ending definition mode for NetCDF \n");
if (retval = nc_enddef(file)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_enddef(z_file)) ERR(retval);
}
if (pars_->ResWrite) {
if (retval = nc_enddef(r_file)) ERR(retval);
}
///////////////////////////////////
// //
// x //
// //
///////////////////////////////////
x_start[0] = 0;
x_count[0] = grids_->Nx;
if (retval = nc_put_vara(file, x, x_start, x_count, grids_->x_h)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_put_vara(z_file, zx, x_start, x_count, grids_->x_h)) ERR(retval);
}
///////////////////////////////////
// //
// y //
// //
///////////////////////////////////
y_start[0] = 0;
y_count[0] = grids_->Ny;
if (retval = nc_put_vara(file, y, y_start, y_count, grids_->y_h)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_put_vara(z_file, zy, y_start, y_count, grids_->y_h)) ERR(retval);
}
///////////////////////////////////
// //
// z //
// //
///////////////////////////////////
z_start[0] = 0;
z_count[0] = grids_->Nz;
// if (retval = nc_put_vara(file, z, z_start, z_count, z_h)) ERR(retval);
///////////////////////////////////
// //
// kz //
// //
///////////////////////////////////
kz_start[0] = 0;
kz_count[0] = grids_->Nz;
for (int i=0; i<grids_->Nz; i++) grids_->kpar_outh[i] = geo_->gradpar*grids_->kz_outh[i];
if (retval = nc_put_vara(file, kz, kz_start, kz_count, grids_->kpar_outh)) ERR(retval);
///////////////////////////////////
// //
// ky //
// //
///////////////////////////////////
ky_start[0] = 0;
ky_count[0] = grids_->Naky;
if (retval = nc_put_vara(file, ky, ky_start, ky_count, grids_->ky_h)) ERR(retval);
///////////////////////////////////
// //
// kx //
// //
///////////////////////////////////
kx_start[0] = 0;
kx_count[0] = grids_->Nakx;
if (retval = nc_put_vara(file, kx, kx_start, kx_count, grids_->kx_outh)) ERR(retval);
///////////////////////////////////
// //
// geometric information //
// //
///////////////////////////////////
geo_start[0] = 0;
geo_count[0] = grids_->Nz;
if (retval = nc_put_vara(file, theta, geo_start, geo_count, geo_->z_h)) ERR(retval);
if (linked && false) {
int Nx = grids_->Nx;
int Ny = grids_->Ny;
int Nz = grids_->Nz;
int Naky = grids_->Naky;
zkxky_count[0] = Nz;
zkxky_count[1] = 1;
zkxky_count[2] = 1;
size_t size = sizeof(float)*Nz;
cudaMallocHost((void**) &theta_extended, size);
float th0;
for (int i=0; i<(Nx-1)/3+1; i++) {
for (int j=0; j<(Ny-1)/3+1; j++) {
if (j==0) {th0 = 0.;} else {th0 = grids_->kx_h[i]/(grids_->ky_h[j]*pars_->shat);}
for (int k=0; k<Nz; k++) {
theta_extended[k] = geo_->z_h[k] - th0;
}
zkxky_start[0] = 0;
zkxky_start[1] = i;
zkxky_start[2] = j;
if (retval = nc_put_vara(nc_geo, theta_x, zkxky_start, zkxky_count, theta_extended)) ERR(retval);
}
}
for(int i=2*Nx/3+1; i<Nx; i++) {
for(int j=0; j<Naky; j++) {
if (j==0) {th0 = 0.;} else {th0 = grids_->kx_h[i]/(grids_->ky_h[j]*pars_->shat);}
for (int k=0; k<Nz; k++) {
theta_extended[k] = geo_->z_h[k] - th0;
}
zkxky_start[0] = 0;
zkxky_start[1] = i-2*Nx/3+(Nx-1)/3;
zkxky_start[2] = j;
if (retval = nc_put_vara(nc_geo, theta_x, zkxky_start, zkxky_count, theta_extended)) ERR(retval);
}
}
if (theta_extended) cudaFreeHost(theta_extended);
}
// if (retval = nc_put_vara(file, theta, geo_start, geo_count, geo_->z_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, bmag, geo_start, geo_count, geo_->bmag_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, bgrad, geo_start, geo_count, geo_->bgrad_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gbdrift, geo_start, geo_count, geo_->gbdrift_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gbdrift0, geo_start, geo_count, geo_->gbdrift0_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, cvdrift, geo_start, geo_count, geo_->cvdrift_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, cvdrift0, geo_start, geo_count, geo_->cvdrift0_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds2, geo_start, geo_count, geo_->gds2_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds21, geo_start, geo_count, geo_->gds21_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, gds22, geo_start, geo_count, geo_->gds22_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, grho, geo_start, geo_count, geo_->grho_h)) ERR(retval);
if (retval = nc_put_vara(nc_geo, jacobian, geo_start, geo_count, geo_->jacobian_h)) ERR(retval);
if (retval = nc_put_var (nc_geo, ivar, &geo_->gradpar)) ERR(retval);
idum = pars_->boundary_option_periodic ? 1 : 0;
if (retval = nc_put_var(file, periodic, &idum)) ERR(retval);
idum = pars_->local_limit ? 1 : 0;
if (retval = nc_put_var(file, local_limit, &idum)) ERR(retval);
}
NetCDF_ids::~NetCDF_ids() {
if (primary) cudaFreeHost ( primary );
if (secondary) cudaFreeHost ( secondary );
if (tertiary) cudaFreeHost ( tertiary );
if (amom) cudaFree ( amom );
if (df) cudaFree ( df );
if (favg) cudaFree ( favg );
if (red) delete red;
if (pot) delete pot;
if (ph2) delete ph2;
if (all_red) delete all_red;
}
void NetCDF_ids::write_zonal_nc(nca *D, bool endrun) {
int retval;
if (D->write && endrun) {
if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &D->zonal)) ERR(retval);
}
if (D->write_v_time) {
if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &D->zonal)) ERR(retval);
}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, D->cpu)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, D->cpu)) ERR(retval);}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, double data, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &data)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &data)) ERR(retval);}
D->increment_ts();
}
void NetCDF_ids::write_nc(nca *D, float data, bool endrun) {
int retval;
if (D->write && endrun) {if (retval=nc_put_vara(D->file, D->idx, D->start, D->count, &data)) ERR(retval);}
if (D->write_v_time) {if (retval=nc_put_vara(D->file, D->time, D->time_start, D->time_count, &data)) ERR(retval);}
D->increment_ts();
}
/*
void NetCDF_ids::pzt(MomentsG* G, Fields* f)
{
int threads=256;
int blocks=(grids_->NxNycNz+threads-1)/threads;
primary[0]=0.; secondary[0]=0.; tertiary[0]=0.;
Tbar <<<blocks, threads>>> (t_bar, G->G(), f->phi, geo_->kperp2);
get_pzt <<<blocks, threads>>> (&primary[0], &secondary[0], &tertiary[0], f->phi, t_bar);
}
*/
void NetCDF_ids::write_Pky(float* P2, bool endrun)
{
if (Pky -> write_v_time || (Pky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nspecies;
pot->Sum(P2, Pky->data, PSPECTRA_ky); CP_TO_CPU(Pky->tmp, Pky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
for (int ik = 0; ik < grids_->Naky; ik++) {
Pky->cpu[ik + is*grids_->Naky] = Pky->tmp[ik + is*grids_->Nyc];
}
}
write_nc(Pky, endrun);
}
}
void NetCDF_ids::write_Pkx(float* P2, bool endrun)
{
if (Pkx -> write_v_time || (Pkx -> write && endrun)) {
int i = grids_->Nx*grids_->Nspecies;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
pot->Sum(P2, Pkx->data, PSPECTRA_kx); CP_TO_CPU(Pkx->tmp, Pkx->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
Pkx->cpu[itp + is*grids_->Nakx] = Pkx->tmp[it + is*grids_->Nx];
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Pkx->cpu[itp + is*grids_->Nakx] = Pkx->tmp[it + is*grids_->Nx];
Pkx->cpu[itn + is*grids_->Nakx] = Pkx->tmp[itm + is*grids_->Nx];
}
}
write_nc(Pkx, endrun);
}
}
void NetCDF_ids::write_Pz(float* P2, bool endrun)
{
if (Pz -> write_v_time || (Pz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies;
pot->Sum(P2, Pz->data, PSPECTRA_z); CP_TO_CPU(Pz->cpu, Pz->data, sizeof(float)*i);
write_nc(Pz, endrun);
}
}
void NetCDF_ids::write_Pkz(float* P2, bool endrun)
{
if (Pkz -> write_v_time || (Pkz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies; int Nz = grids_->Nz;
pot->Sum(P2, Pkz->data, PSPECTRA_kz); CP_TO_CPU(Pkz->tmp, Pkz->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
if (Nz>1) {
for (int i = 0; i < Nz; i++) Pkz->cpu[i + is*Nz] = Pkz->tmp[ (i + Nz/2 + 1) % Nz + is*Nz ];
} else {
for (int i = 0; i < Nz; i++) Pkz->cpu[i + is*Nz] = Pkz->tmp[ i + is*Nz ];
}
}
write_nc(Pkz, endrun);
}
}
void NetCDF_ids::write_Pkxky(float* P2, bool endrun)
{
if (Pkxky -> write_v_time || (Pkxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx*grids_->Nspecies;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
pot->Sum(P2, Pkxky->data, PSPECTRA_kxky);
CP_TO_CPU(Pkxky->tmp, Pkxky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc *grids_->Nx;
Pkxky->cpu[Qp] = Pkxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc * NX;
int Qn = itn + ik *grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rm = ik + itm*grids_->Nyc + is*grids_->Nyc * NX;
Pkxky->cpu[Qp] = Pkxky->tmp[Rp];
Pkxky->cpu[Qn] = Pkxky->tmp[Rm];
}
}
}
write_nc(Pkxky, endrun);
}
}
void NetCDF_ids::write_Wz(float *G2, bool endrun)
{
if (Wz -> write_v_time || (Wz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies;
red->Sum(G2, Wz->data, WSPECTRA_z); CP_TO_CPU(Wz->cpu, Wz->data, sizeof(float)*i);
write_nc(Wz, endrun);
}
}
void NetCDF_ids::write_Wkz(float *G2, bool endrun)
{
if (Wkz -> write_v_time || (Wkz -> write && endrun)) {
int i = grids_->Nz*grids_->Nspecies; int Nz = grids_->Nz;
red->Sum(G2, Wkz->data, WSPECTRA_kz); CP_TO_CPU(Wkz->tmp, Wkz->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
if (Nz>1) {
for (int i = 0; i < Nz; i++) Wkz->cpu[i+is*Nz] = Wkz->tmp[ (i + Nz/2 + 1) % Nz + is*Nz ];
} else {
for (int i = 0; i < Nz; i++) Wkz->cpu[i+is*Nz] = Wkz->tmp[ i + is*Nz ];
}
}
write_nc (Wkz, endrun);
}
}
void NetCDF_ids::write_Ws(float* G2, bool endrun)
{
if (Ws -> write_v_time) {
red->Sum(G2, Ws->data, WSPECTRA_species); CP_TO_CPU(Ws->cpu, Ws->data, sizeof(float)*grids_->Nspecies);
write_nc(Ws, endrun);
if (Wtot -> write_v_time) {
for (int is=0; is < grids_->Nspecies; is++) totW += Ws->cpu[is];
}
}
}
void NetCDF_ids::write_Wky(float* G2, bool endrun)
{
if (Wky -> write_v_time || (Wky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nspecies;
red->Sum(G2, Wky->data, WSPECTRA_ky); CP_TO_CPU(Wky->tmp, Wky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
for (int ik = 0; ik < grids_->Naky; ik++) {
Wky->cpu[ik + is*grids_->Naky] = Wky->tmp[ik + is*grids_->Nyc];
}
}
write_nc(Wky, endrun);
}
}
void NetCDF_ids::write_Wkx(float* G2, bool endrun)
{
if (Wkx -> write_v_time || (Wkx -> write && endrun)) {
int i = grids_->Nx*grids_->Nspecies;
int NX = grids_->Nx;
int NK = grids_->Nakx/2;
red->Sum(G2, Wkx->data, WSPECTRA_kx); CP_TO_CPU(Wkx->tmp, Wkx->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
Wkx->cpu[itp + is*grids_->Nakx] = Wkx->tmp[it + is*grids_->Nx];
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Wkx->cpu[itp + is*grids_->Nakx] = Wkx->tmp[it + is*grids_->Nx];
Wkx->cpu[itn + is*grids_->Nakx] = Wkx->tmp[itm + is*grids_->Nx];
}
}
write_nc(Wkx, endrun);
}
}
void NetCDF_ids::write_Wkxky(float* G2, bool endrun)
{
if (Wkxky -> write_v_time || (Wkxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx*grids_->Nspecies; // int NK = (grids_->Nx-1)/3+1;
int NK = grids_->Nakx/2;
int NX = grids_->Nx;
red->Sum(G2, Wkxky->data, WSPECTRA_kxky); CP_TO_CPU(Wkxky->tmp, Wkxky->data, sizeof(float)*i);
for (int is = 0; is < grids_->Nspecies; is++) {
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc *grids_->Nx;
Wkxky->cpu[Qp] = Wkxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rp = ik + it*grids_->Nyc + is*grids_->Nyc * NX;
int Qn = itn + ik *grids_->Nakx + is*grids_->Naky*grids_->Nakx;
int Rm = ik + itm*grids_->Nyc + is*grids_->Nyc * NX;
Wkxky->cpu[Qp] = Wkxky->tmp[Rp];
Wkxky->cpu[Qn] = Wkxky->tmp[Rm];
}
}
}
write_nc(Wkxky, endrun);
}
}
void NetCDF_ids::write_Wm(float* G2, bool endrun)
{
if (Wm -> write_v_time || (Wm -> write && endrun)) {
int i = grids_->Nm*grids_->Nspecies;
red-> Sum(G2, Wm -> data, WSPECTRA_m); CP_TO_CPU(Wm->cpu, Wm->data, sizeof(float)*i);
write_nc(Wm, endrun);
}
}
void NetCDF_ids::write_Wlm(float* G2, bool endrun)
{
if (Wlm -> write_v_time || (Wlm -> write && endrun)) {
int i = grids_->Nmoms*grids_->Nspecies;
red->Sum(G2, Wlm->data, WSPECTRA_lm); CP_TO_CPU(Wlm->cpu, Wlm->data, sizeof(float)*i);
write_nc(Wlm, endrun);
}
}
void NetCDF_ids::write_Wl(float* G2, bool endrun)
{
if (Wl -> write_v_time || (Wl -> write && endrun)) {
int i = grids_->Nl*grids_->Nspecies;
red->Sum(G2, Wl->data, WSPECTRA_l); CP_TO_CPU(Wl->cpu, Wl->data, sizeof(float)*i);
write_nc(Wl, endrun);
}
}
void NetCDF_ids::write_Ps(float* P2, bool endrun)
{
if (Ps -> write_v_time) {
pot->Sum(P2, Ps->data, PSPECTRA_species); CP_TO_CPU(Ps->cpu, Ps->data, sizeof(float)*grids_->Nspecies);
write_nc(Ps, endrun);
if (Wtot -> write_v_time) {
totW = 0.;
for (int is=0; is < grids_->Nspecies; is++) totW += Ps->cpu[is];
}
}
}
void NetCDF_ids::write_Aky(float* P2, bool endrun)
{
if (Aky -> write_v_time || (Aky -> write && endrun)) {
int i = grids_->Naky;
ph2->Sum(P2, Aky->data, ASPECTRA_ky); CP_TO_CPU(Aky->cpu, Aky->data, sizeof(float)*i);
write_nc(Aky, endrun);
}
}
void NetCDF_ids::write_Az(float* P2, bool endrun)
{
if (Az -> write_v_time || (Az -> write && endrun)) {
int i = grids_->Nz;
ph2->Sum(P2, Az->data, ASPECTRA_z); CP_TO_CPU(Az->cpu, Az->data, sizeof(float)*i);
write_nc(Az, endrun);
}
}
void NetCDF_ids::write_Akz(float* P2, bool endrun)
{
if (Akz -> write_v_time || (Akz -> write && endrun)) {
int Nz = grids_->Nz;
ph2->Sum(P2, Akz->data, ASPECTRA_kz); CP_TO_CPU(Akz->tmp, Akz->data, sizeof(float)*Nz);
if (Nz>1) {
for (int i = 0; i < Nz; i++) Akz->cpu[i] = Akz->tmp[ (i + Nz/2 + 1) % Nz ];
} else {
for (int i = 0; i < Nz; i++) Akz->cpu[i] = Akz->tmp[ i ];
}
write_nc(Akz, endrun);
}
}
void NetCDF_ids::write_Akx(float* P2, bool endrun)
{
if (Akx -> write_v_time || (Akx -> write && endrun)) {
int NX = grids_->Nx;
int NK = grids_->Nakx/2;
ph2->Sum(P2, Akx->data, ASPECTRA_kx); CP_TO_CPU(Akx->tmp, Akx->data, sizeof(float)*NX);
int it = 0;
int itp = it + NK;
Akx->cpu[itp] = Akx->tmp[it ];;
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
Akx->cpu[itp] = Akx->tmp[it ];;
Akx->cpu[itn] = Akx->tmp[itm];;
}
write_nc(Akx, endrun);
}
}
void NetCDF_ids::write_Akxky(float* P2, bool endrun)
{
if (Akxky -> write_v_time || (Akxky -> write && endrun)) {
int i = grids_->Nyc*grids_->Nx; int NK = grids_->Nakx/2; int NX = grids_->Nx;
ph2->Sum(P2, Akxky->data, ASPECTRA_kxky); CP_TO_CPU(Akxky->tmp, Akxky->data, sizeof(float)*i);
int it = 0;
int itp = it + NK;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx ;
int Rp = ik + it*grids_->Nyc ;
Akxky->cpu[Qp] = Akxky->tmp[Rp];
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = NX - it;
for (int ik = 0; ik < grids_->Naky; ik++) {
int Qp = itp + ik*grids_->Nakx ;
int Rp = ik + it*grids_->Nyc ;
int Qn = itn + ik *grids_->Nakx ;
int Rm = ik + itm*grids_->Nyc ;
Akxky->cpu[Qp] = Akxky->tmp[Rp];
Akxky->cpu[Qn] = Akxky->tmp[Rm];
}
}
write_nc(Akxky, endrun);
}
}
void NetCDF_ids::write_As(float *P2, bool endrun)
{
if (As -> write_v_time) {
ph2->Sum(P2, As->data, ASPECTRA_species); CP_TO_CPU (As->cpu, As->data, sizeof(float));
write_nc(As, endrun);
if (Wtot -> write_v_time) totW += *As->cpu;
}
}
void NetCDF_ids::write_Q (float* Q, bool endrun)
{
if (qs -> write_v_time) {
all_red->Sum(Q, qs->data); CP_TO_CPU (qs->cpu, qs->data, sizeof(float)*grids_->Nspecies);
write_nc(qs, endrun);
for (int is=0; is<grids_->Nspecies; is++) printf ("%e \t ",qs->cpu[is]);
printf("\n");
}
}
void NetCDF_ids::write_omg(cuComplex *W, bool endrun)
{
CP_TO_CPU (omg->z_tmp, W, sizeof(cuComplex)*grids_->NxNyc);
reduce2k(omg->cpu, omg->z_tmp);
write_nc(omg, endrun);
}
void NetCDF_ids::write_Wtot()
{
if (Wtot -> write_v_time) { write_nc(Wtot, totW); totW = 0.;}
}
void NetCDF_ids::close_nc_file() {
int retval;
if (retval = nc_close( file)) ERR(retval);
if (pars_->write_xymom) {
if (retval = nc_close(pars_->nczid)) ERR(retval);
}
}
void NetCDF_ids::write_moment(nca *D, cuComplex *f, float* vol_fac) {
//
// If D->dx = true, take one derivative in x
// If D->d2x = true, take two derivatives in x
// Multiply by D->adj
// If D->xydata = true, output is function of (x, y) with zonal component subtracted
// If D->xdata = true, output is function of x only
// If D->scalar = true, output is sqrt (sum_kx <<f**2(kx)>>)
if (!D->write_v_time) return;
cuComplex zz = make_cuComplex(0., 0.); setval loop_R (amom, zz, D->N_);
// Perform any desired d/dx operations
if (D->d2x) {
d2x Gmom (amom, f, grids_->kx);
} else if (D->dx) {
ddx Gmom (amom, f, grids_->kx);
} else {
CP_ON_GPU (amom, f, sizeof(cuComplex)*grids_->NxNycNz);
}
// Hermite -> physical moments
if (D->adj > 1.0) {
scale_singlemom_kernel loop_R (amom, amom, D->adj); // loop_R has more elements than required but it is safe
}
if (D->xydata) {
fieldlineaverage GFLA (favg, df, amom, vol_fac); // D->tmp = <<f>>(kx), df = f - <<f>>
grad_phi -> C2R(df, D->data);
xytranspose loop_xy (D->data, D->tmp_d); // For now, take the first plane in the z-direction by default
CP_TO_CPU(D->cpu, D->tmp_d, sizeof(float)*D->Nwrite_);
write_nc(D);
return;
}
grad_phi -> C2R(amom, D->data);
yzavg loop_x (D->data, D->tmp_d, vol_fac);
CP_TO_CPU (D->cpu, D->tmp_d, sizeof(float)*D->Nwrite_);
if (D->xdata) {
write_nc(D);
return;
}
if (D->scalar) {
D->zonal = 0.;
for (int idx = 0; idx<grids_->Nx; idx++) D->zonal += D->cpu[idx] * D->cpu[idx];
D->zonal = sqrtf(D->zonal/((float) grids_->Nx));
write_zonal_nc(D);
return;
}
}
void NetCDF_ids::write_ks_data(nca *D, cuComplex *G) {
if (!D->write_v_time) return;
grad_perp->C2R(G, D->data);
CP_TO_CPU (D->cpu, D->data, sizeof(float)*D->N_);
write_nc(D);
}
void NetCDF_ids::write_ks_data(nca *D, float *G) {
if (!D->write_v_time) return;
CP_TO_CPU (D->cpu, G, sizeof(float)*D->N_);
write_nc(D);
}
// condense a (ky,kx) object for netcdf output, taking into account the mask
// and changing the type from cuComplex to float
void NetCDF_ids::reduce2k(float *fk, cuComplex* f) {
int Nx = grids_->Nx;
int Nakx = grids_->Nakx;
int Naky = grids_->Naky;
int Nyc = grids_->Nyc;
int NK = grids_->Nakx/2;
int it = 0;
int itp = it + NK;
for (int ik=0; ik<Naky; ik++) {
int Qp = itp + ik*Nakx;
int Rp = ik + it*Nyc;
fk[2*Qp ] = f[Rp].x;
fk[2*Qp+1] = f[Rp].y;
}
for (int it = 1; it < NK+1; it++) {
int itp = NK + it;
int itn = NK - it;
int itm = Nx - it;
for (int ik=0; ik<Naky; ik++) {
int Qp = itp + ik*Nakx;
int Rp = ik + it*Nyc;
int Qn = itn + ik*Nakx;
int Rm = ik + itm*Nyc;
fk[2*Qp ] = f[Rp].x;
fk[2*Qp+1] = f[Rp].y;
fk[2*Qn ] = f[Rm].x;
fk[2*Qn+1] = f[Rm].y;
}
}
}
|
4a605928ccd2d2720115c10d14f9d511d5026254.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = tid; i < numEdges; i+= num_threads){
int j = src[i];
int k = dst[i];
if(matches[j] == -1 && matches[k] == -1){
keepEdges[i] = 1;
}
else{
keepEdges[i] = 0;
}
}
}
| 4a605928ccd2d2720115c10d14f9d511d5026254.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = tid; i < numEdges; i+= num_threads){
int j = src[i];
int k = dst[i];
if(matches[j] == -1 && matches[k] == -1){
keepEdges[i] = 1;
}
else{
keepEdges[i] = 0;
}
}
}
|
e77a2148ecabded63e9df2108430a372ab001738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(hipMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square");
THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible");
int64_t n = a_->size(0);
int64_t nrhs = b_->size(1);
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_,
const char *uplo, const char *trans, const char *diag)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square");
THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible");
magma_side_t sz = MagmaLeft;
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans;
magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit;
real alpha = 1;
int64_t n = a_->size(0);
int64_t nrhs = b_->size(1);
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
#if defined(THC_REAL_IS_FLOAT)
magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n);
#else
magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n);
#endif
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(trtrs));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == b_->size(0), 2, "Expected A and b to have same size "
"at dim 0, but they have incompatible sizes");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = THTensor_sizeLegacyNoScalars(a, 0);
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
if (n > 0) {
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda * n);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
}
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real *work_data = nullptr;
if (n > 0) {
int info;
real wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(real), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(real), hipMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
#ifdef USE_MAGMA
int info;
int64_t n = a->size(0);
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
int64_t n = a->size(0);
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, a, a);
THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input));
real *matrices1[1] = { THCTensor_(data)(state, input) };
real *matrices2[1] = { THCTensor_(data)(state, ra_) };
// Copy pointers to device.
auto d_matrices1 = static_cast<real**>(THCudaMalloc(state, sizeof(real*)));
auto d_matrices2 = static_cast<real**>(THCudaMalloc(state, sizeof(real*)));
THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, sizeof(real*),
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, sizeof(real*),
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int)));
auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaFree(state, ipiv_gpu);
THCudaFree(state, info_gpu);
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCTensor_(free)(state, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be (non-empty) 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
int64_t nrhs = b->size(1);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
real *rtau_data = th_magma_malloc_pinned<real>(k);
real *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(k);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
// We need to call two different versions of ?geqrf:
// ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give
// R properly. Note that the MAGMA documentation for this method is wrong.
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
// ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
a = THCTensor_(newColumnMajor)(state, rq_, a_);
a_data = THCTensor_(data)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, a);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
| e77a2148ecabded63e9df2108430a372ab001738.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square");
THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible");
int64_t n = a_->size(0);
int64_t nrhs = b_->size(1);
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_,
const char *uplo, const char *trans, const char *diag)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square");
THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible");
magma_side_t sz = MagmaLeft;
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans;
magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit;
real alpha = 1;
int64_t n = a_->size(0);
int64_t nrhs = b_->size(1);
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
#if defined(THC_REAL_IS_FLOAT)
magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n);
#else
magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n);
#endif
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(trtrs));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
THArgCheck(a_->size(0) == b_->size(0), 2, "Expected A and b to have same size "
"at dim 0, but they have incompatible sizes");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = THTensor_sizeLegacyNoScalars(a, 0);
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
if (n > 0) {
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda * n);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
}
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real *work_data = nullptr;
if (n > 0) {
int info;
real wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(real), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(real), cudaMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
#ifdef USE_MAGMA
int info;
int64_t n = a->size(0);
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
int64_t n = a->size(0);
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, a, a);
THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input));
real *matrices1[1] = { THCTensor_(data)(state, input) };
real *matrices2[1] = { THCTensor_(data)(state, ra_) };
// Copy pointers to device.
auto d_matrices1 = static_cast<real**>(THCudaMalloc(state, sizeof(real*)));
auto d_matrices2 = static_cast<real**>(THCudaMalloc(state, sizeof(real*)));
THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*),
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*),
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int)));
auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaFree(state, ipiv_gpu);
THCudaFree(state, info_gpu);
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCTensor_(free)(state, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be (non-empty) 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
int64_t nrhs = b->size(1);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
real *rtau_data = th_magma_malloc_pinned<real>(k);
real *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(k);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
// We need to call two different versions of ?geqrf:
// ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give
// R properly. Note that the MAGMA documentation for this method is wrong.
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
// ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
a = THCTensor_(newColumnMajor)(state, rq_, a_);
a_data = THCTensor_(data)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, a);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
|
8bf2e48d85ff7e79b29c85035c0d49f1fc9de211.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
__global__ void kernel_update_pos( particule_t *p, vector_t *acc, int size ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < size)
{
p[i].vx += acc[i].x*MASSFACTOR*DAMP;
p[i].vy += acc[i].y*MASSFACTOR*DAMP;
p[i].vz += acc[i].z*MASSFACTOR*DAMP;
p[i].x += (p[i].vx)*DT;
p[i].y += (p[i].vy)*DT;
p[i].z += (p[i].vz)*DT;
}
}
__global__ void kernel_update_acc( particule_t *p, vector_t *acc, int size ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//int stride = gridDim.x * blockDim.x;
int i=0;
float dx,dy,dz,d,fact;
if ( j < size ) {
//for(j=index;j<size; j+= stride){
acc[j].x = 0.0f;
acc[j].y = 0.0f;
acc[j].z = 0.0f;
for (i = 0; i < size; ++i)
{
dx = p[i].x-p[j].x;
dy = p[i].y-p[j].y;
dz = p[i].z-p[j].z;
d = dx*dx+dy*dy+dz*dz;
if ( d < 1.0 ) d = 1.0;
fact=p[i].m/(d*sqrtf(d));
acc[j].x += dx*fact;
acc[j].y += dy*fact;
acc[j].z += dz*fact;
}
}
}
void update_acc( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
hipLaunchKernelGGL(( kernel_update_acc), dim3(nblocks), dim3(nthreads), 0, 0, p, acc, size);
}
void update_position( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
hipLaunchKernelGGL(( kernel_update_acc), dim3(nblocks), dim3(nthreads), 0, 0, p, acc, size);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_update_pos), dim3(nblocks), dim3(nthreads), 0, 0, p, acc, size);
}
| 8bf2e48d85ff7e79b29c85035c0d49f1fc9de211.cu | #include "cuda.h"
#include "header.h"
__global__ void kernel_update_pos( particule_t *p, vector_t *acc, int size ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < size)
{
p[i].vx += acc[i].x*MASSFACTOR*DAMP;
p[i].vy += acc[i].y*MASSFACTOR*DAMP;
p[i].vz += acc[i].z*MASSFACTOR*DAMP;
p[i].x += (p[i].vx)*DT;
p[i].y += (p[i].vy)*DT;
p[i].z += (p[i].vz)*DT;
}
}
__global__ void kernel_update_acc( particule_t *p, vector_t *acc, int size ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//int stride = gridDim.x * blockDim.x;
int i=0;
float dx,dy,dz,d,fact;
if ( j < size ) {
//for(j=index;j<size; j+= stride){
acc[j].x = 0.0f;
acc[j].y = 0.0f;
acc[j].z = 0.0f;
for (i = 0; i < size; ++i)
{
dx = p[i].x-p[j].x;
dy = p[i].y-p[j].y;
dz = p[i].z-p[j].z;
d = dx*dx+dy*dy+dz*dz;
if ( d < 1.0 ) d = 1.0;
fact=p[i].m/(d*sqrtf(d));
acc[j].x += dx*fact;
acc[j].y += dy*fact;
acc[j].z += dz*fact;
}
}
}
void update_acc( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
kernel_update_acc<<<nblocks, nthreads>>>( p, acc, size);
}
void update_position( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
kernel_update_acc<<<nblocks, nthreads>>>( p, acc, size);
//cudaDeviceSynchronize();
kernel_update_pos<<<nblocks, nthreads>>>( p, acc, size);
}
|
ae66fc35e6bbefb20e84066114e756ec034cb3f3.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <string.h>
#include "../serializer/serial.h"
int main(int argc,char** argv){
FILE* fp;
fp = fopen("rawData/test.rawdata","rb");
fseek(fp,0,SEEK_END);
size_t size = ftell(fp);
fseek(fp,0,SEEK_SET);
printf("fsize:%d\n",(int)size);
void *dat = malloc(size);
fread(dat,1,size,fp);
void *now = dat;
void *tmp = now;
float *target;
int nElem;
for (int j=0;j<5;j++){
now = NextBuf(now);
tmp = DescendBuf(now);
printf("%s\n",NameBuf(tmp));
target = (float*)DescendBuf(tmp);
nElem = SizeBuf(tmp)/sizeof(float);
printf("<nELem:%d>",nElem);
for (int i=0;i<10;i++){
printf("%f,",target[i]);
}
printf("\n");
}
return 0;
} | ae66fc35e6bbefb20e84066114e756ec034cb3f3.cu | #pragma once
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <string.h>
#include "../serializer/serial.h"
int main(int argc,char** argv){
FILE* fp;
fp = fopen("rawData/test.rawdata","rb");
fseek(fp,0,SEEK_END);
size_t size = ftell(fp);
fseek(fp,0,SEEK_SET);
printf("fsize:%d\n",(int)size);
void *dat = malloc(size);
fread(dat,1,size,fp);
void *now = dat;
void *tmp = now;
float *target;
int nElem;
for (int j=0;j<5;j++){
now = NextBuf(now);
tmp = DescendBuf(now);
printf("%s\n",NameBuf(tmp));
target = (float*)DescendBuf(tmp);
nElem = SizeBuf(tmp)/sizeof(float);
printf("<nELem:%d>",nElem);
for (int i=0;i<10;i++){
printf("%f,",target[i]);
}
printf("\n");
}
return 0;
} |
08d5b1e910d79000d2c25d025faa7c254d8aff4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//monteCarloEngine.cu
//Scott Grauer-Gray
//May 10, 2012
//Function for running Monte Carlo on the GPU
//needed for the monte carlo GPU kernels
#include "monteCarloKernels.cu"
//needed for the monte carlo CPU kernels
#include "monteCarloKernelsCpu.cu"
//needed for hiprand
#include <hiprand/hiprand_kernel.h>
//needed for the thrust library (can be used for summation on GPU)
/* #include <thrust/transform_reduce.h> */
/* #include <thrust/functional.h> */
/* #include <thrust/device_vector.h> */
/* #include <thrust/host_vector.h> */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define RISK_VAL 0.06f
#define DIV_VAL 0.0f
#define VOLT_VAL 0.200f
#define UNDERLYING_VAL 30.0f
#define STRIKE_VAL 40.0f
#define DISCOUNT_VAL 0.94176453358424872f
//initialize the inputs
void initializeInputs(dataType* samplePrices, dataType* sampleWeights, dataType* times)
{
}
//run monte carlo...
void runMonteCarlo()
{
//int nSamplesArray[] = {100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000};//, 1000000, 2000000};//, 5000000};//, 10000000, 20000000};
int numSamples = 400000;
//for (int numTime=0; numTime < 12; numTime++)
{
//int numSamples = nSamplesArray[numTime];
printf("Number of Samples: %d\n\n", numSamples);
//declare and initialize the struct used for the option
monteCarloOptionStruct optionStruct;
optionStruct.riskVal = RISK_VAL;
optionStruct.divVal = DIV_VAL;
optionStruct.voltVal = VOLT_VAL;
optionStruct.underlyingVal = UNDERLYING_VAL;
optionStruct.strikeVal = STRIKE_VAL;
optionStruct.discountVal = DISCOUNT_VAL;
//declare pointers for data on CPU
dataType* samplePrices;
dataType* sampleWeights;
dataType* times;
monteCarloOptionStruct* optionStructs;
//allocate space for data on CPU
samplePrices = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
sampleWeights = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
times = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
optionStructs = (monteCarloOptionStruct*)malloc(NUM_OPTIONS*sizeof(monteCarloOptionStruct));
long seconds, useconds;
dataType mtimeGpu, mtimeCpu;
struct timeval start;
struct timeval end;
for (int optNum = 0; optNum < NUM_OPTIONS; optNum++)
{
optionStructs[optNum] = optionStruct;
}
//initialize values for data on CPU
//declare pointers for data on GPU
dataType* samplePricesGpu;
dataType* sampleWeightsGpu;
dataType* timesGpu;
monteCarloOptionStruct* optionStructsGpu;
//declare what's necessary to use hiprand
hiprandState_t* devStates;
/* Allocate space for prng states on device */
hipMalloc (( void **) & devStates , numSamples * sizeof ( hiprandState_t ) );
//allocate space for data on GPU
hipMalloc(&samplePricesGpu, NUM_OPTIONS*numSamples*sizeof(dataType));
hipMalloc(&sampleWeightsGpu, NUM_OPTIONS*numSamples*sizeof(dataType));
hipMalloc(×Gpu, NUM_OPTIONS*numSamples*sizeof(dataType));
hipMalloc(&optionStructsGpu, NUM_OPTIONS*sizeof(monteCarloOptionStruct));
//transfer data to GPU
hipMemcpy(samplePricesGpu, samplePrices, NUM_OPTIONS*numSamples*sizeof(dataType), hipMemcpyHostToDevice);
hipMemcpy(sampleWeightsGpu, sampleWeights, NUM_OPTIONS*numSamples*sizeof(dataType), hipMemcpyHostToDevice);
hipMemcpy(timesGpu, times, NUM_OPTIONS*numSamples*sizeof(dataType), hipMemcpyHostToDevice);
hipMemcpy(optionStructsGpu, optionStructs, NUM_OPTIONS*sizeof(monteCarloOptionStruct), hipMemcpyHostToDevice);
srand(time(NULL));
//for (int numTime=0; numTime < 100; numTime++)
{
/* initialize random seed: */
srand ( rand());
printf("Run on GPU\n");
gettimeofday(&start, NULL);
// setup execution parameters
dim3 grid1( (size_t)ceil((dataType)numSamples / ((dataType)THREAD_BLOCK_SIZE)), 1, 1);
dim3 threads1( THREAD_BLOCK_SIZE, 1, 1);
//initializes the states for the random number generator
hipLaunchKernelGGL(( setup_kernel) , dim3(grid1), dim3(threads1) , 0, 0, devStates, rand(), numSamples);
hipDeviceSynchronize();
// setup execution parameters
dim3 grid2( (size_t)ceil((dataType)numSamples / (THREAD_BLOCK_SIZE)), 1, 1);
dim3 threads2( THREAD_BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( monteCarloGpuKernel) , dim3(grid2), dim3(threads2) , 0, 0, samplePricesGpu, sampleWeightsGpu, timesGpu, (1.0f / (dataType)SEQUENCE_LENGTH), devStates, optionStructsGpu, numSamples);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeGpu = ((seconds) * 1000 + ((dataType)useconds)/1000.0) + 0.5;
printf("Processing time on GPU (CUDA): %f (ms)\n", mtimeGpu);
// remove comments to perform summation on GPU using thrust library
/* //create a pointer to point to the prices */
/* thrust::device_ptr<dataType> devicePointerPrices(samplePricesGpu); */
/* //compute the value for each option */
/* for (int numOpt=0; numOpt < NUM_OPTIONS; numOpt++) */
/* { */
/* //use the reduction function in thrust to retrieve the summation of the prices */
/* dataType sum = thrust::reduce(devicePointerPrices + numOpt*numSamples, devicePointerPrices + (numOpt+1)*(numSamples), (dataType) 0, thrust::plus<dataType>()); */
/* dataType avgPrice = sum / numSamples; */
/* printf("avgPrice: %f\n", avgPrice); */
/* } */
//transfer data back to host
hipMemcpy(samplePrices, samplePricesGpu, numSamples*sizeof(dataType), hipMemcpyDeviceToHost);
hipMemcpy(sampleWeights, sampleWeightsGpu, numSamples*sizeof(dataType), hipMemcpyDeviceToHost);
hipMemcpy(times, timesGpu, numSamples*sizeof(dataType), hipMemcpyDeviceToHost);
//retrieve the average price
dataType cumPrice = 0.0f;
//add all the computed prices together
for (int numSamp = 0; numSamp < numSamples; numSamp++)
{
cumPrice += samplePrices[numSamp];
}
dataType avgPrice = cumPrice / numSamples;
printf("Average Price (GPU computation): %f\n\n", avgPrice);
}
//free memory space on the GPU
hipFree(samplePricesGpu);
hipFree(sampleWeightsGpu);
hipFree(timesGpu);
hipFree(optionStructsGpu);
//free memory space on the CPU
free(samplePrices);
free(sampleWeights);
free(times);
//declare pointers for data on CPU
dataType* samplePricesCpu;
dataType* sampleWeightsCpu;
dataType* timesCpu;
//allocate space for data on CPU
samplePricesCpu = (dataType*)malloc(numSamples*sizeof(dataType));
sampleWeightsCpu = (dataType*)malloc(numSamples*sizeof(dataType));
timesCpu = (dataType*)malloc(numSamples*sizeof(dataType));
gettimeofday(&start, NULL);
monteCarloGpuKernelCpu(samplePricesCpu, sampleWeightsCpu, timesCpu, (1.0f / (dataType)SEQUENCE_LENGTH), optionStructs, numSamples);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeCpu = ((seconds) * 1000 + ((dataType)useconds)/1000.0) + 0.5;
printf("Run on CPU\n");
printf("Processing time on CPU: %f (ms)\n", mtimeCpu);
//retrieve the average price
dataType cumPrice = 0.0f;
//add all the computed prices together
for (int numSamp = 0; numSamp < numSamples; numSamp++)
{
cumPrice += samplePricesCpu[numSamp];
}
dataType avgPrice = cumPrice / numSamples;
printf("Average Price (CPU computation): %f\n\n", avgPrice);
printf("Speedup on GPU: %f\n", mtimeCpu / mtimeGpu);
//free memory space on the CPU
free(samplePricesCpu);
free(sampleWeightsCpu);
free(timesCpu);
free(optionStructs);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runMonteCarlo();
char c;
c = getchar();
printf("%c\n", c);
}
| 08d5b1e910d79000d2c25d025faa7c254d8aff4d.cu | //monteCarloEngine.cu
//Scott Grauer-Gray
//May 10, 2012
//Function for running Monte Carlo on the GPU
//needed for the monte carlo GPU kernels
#include "monteCarloKernels.cu"
//needed for the monte carlo CPU kernels
#include "monteCarloKernelsCpu.cu"
//needed for curand
#include <curand_kernel.h>
//needed for the thrust library (can be used for summation on GPU)
/* #include <thrust/transform_reduce.h> */
/* #include <thrust/functional.h> */
/* #include <thrust/device_vector.h> */
/* #include <thrust/host_vector.h> */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define RISK_VAL 0.06f
#define DIV_VAL 0.0f
#define VOLT_VAL 0.200f
#define UNDERLYING_VAL 30.0f
#define STRIKE_VAL 40.0f
#define DISCOUNT_VAL 0.94176453358424872f
//initialize the inputs
void initializeInputs(dataType* samplePrices, dataType* sampleWeights, dataType* times)
{
}
//run monte carlo...
void runMonteCarlo()
{
//int nSamplesArray[] = {100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000};//, 1000000, 2000000};//, 5000000};//, 10000000, 20000000};
int numSamples = 400000;
//for (int numTime=0; numTime < 12; numTime++)
{
//int numSamples = nSamplesArray[numTime];
printf("Number of Samples: %d\n\n", numSamples);
//declare and initialize the struct used for the option
monteCarloOptionStruct optionStruct;
optionStruct.riskVal = RISK_VAL;
optionStruct.divVal = DIV_VAL;
optionStruct.voltVal = VOLT_VAL;
optionStruct.underlyingVal = UNDERLYING_VAL;
optionStruct.strikeVal = STRIKE_VAL;
optionStruct.discountVal = DISCOUNT_VAL;
//declare pointers for data on CPU
dataType* samplePrices;
dataType* sampleWeights;
dataType* times;
monteCarloOptionStruct* optionStructs;
//allocate space for data on CPU
samplePrices = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
sampleWeights = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
times = (dataType*)malloc(NUM_OPTIONS*numSamples*sizeof(dataType));
optionStructs = (monteCarloOptionStruct*)malloc(NUM_OPTIONS*sizeof(monteCarloOptionStruct));
long seconds, useconds;
dataType mtimeGpu, mtimeCpu;
struct timeval start;
struct timeval end;
for (int optNum = 0; optNum < NUM_OPTIONS; optNum++)
{
optionStructs[optNum] = optionStruct;
}
//initialize values for data on CPU
//declare pointers for data on GPU
dataType* samplePricesGpu;
dataType* sampleWeightsGpu;
dataType* timesGpu;
monteCarloOptionStruct* optionStructsGpu;
//declare what's necessary to use curand
curandState* devStates;
/* Allocate space for prng states on device */
cudaMalloc (( void **) & devStates , numSamples * sizeof ( curandState ) );
//allocate space for data on GPU
cudaMalloc(&samplePricesGpu, NUM_OPTIONS*numSamples*sizeof(dataType));
cudaMalloc(&sampleWeightsGpu, NUM_OPTIONS*numSamples*sizeof(dataType));
cudaMalloc(×Gpu, NUM_OPTIONS*numSamples*sizeof(dataType));
cudaMalloc(&optionStructsGpu, NUM_OPTIONS*sizeof(monteCarloOptionStruct));
//transfer data to GPU
cudaMemcpy(samplePricesGpu, samplePrices, NUM_OPTIONS*numSamples*sizeof(dataType), cudaMemcpyHostToDevice);
cudaMemcpy(sampleWeightsGpu, sampleWeights, NUM_OPTIONS*numSamples*sizeof(dataType), cudaMemcpyHostToDevice);
cudaMemcpy(timesGpu, times, NUM_OPTIONS*numSamples*sizeof(dataType), cudaMemcpyHostToDevice);
cudaMemcpy(optionStructsGpu, optionStructs, NUM_OPTIONS*sizeof(monteCarloOptionStruct), cudaMemcpyHostToDevice);
srand(time(NULL));
//for (int numTime=0; numTime < 100; numTime++)
{
/* initialize random seed: */
srand ( rand());
printf("Run on GPU\n");
gettimeofday(&start, NULL);
// setup execution parameters
dim3 grid1( (size_t)ceil((dataType)numSamples / ((dataType)THREAD_BLOCK_SIZE)), 1, 1);
dim3 threads1( THREAD_BLOCK_SIZE, 1, 1);
//initializes the states for the random number generator
setup_kernel <<< grid1, threads1 >>> (devStates, rand(), numSamples);
cudaDeviceSynchronize();
// setup execution parameters
dim3 grid2( (size_t)ceil((dataType)numSamples / (THREAD_BLOCK_SIZE)), 1, 1);
dim3 threads2( THREAD_BLOCK_SIZE, 1, 1);
monteCarloGpuKernel <<< grid2, threads2 >>>(samplePricesGpu, sampleWeightsGpu, timesGpu, (1.0f / (dataType)SEQUENCE_LENGTH), devStates, optionStructsGpu, numSamples);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeGpu = ((seconds) * 1000 + ((dataType)useconds)/1000.0) + 0.5;
printf("Processing time on GPU (CUDA): %f (ms)\n", mtimeGpu);
// remove comments to perform summation on GPU using thrust library
/* //create a pointer to point to the prices */
/* thrust::device_ptr<dataType> devicePointerPrices(samplePricesGpu); */
/* //compute the value for each option */
/* for (int numOpt=0; numOpt < NUM_OPTIONS; numOpt++) */
/* { */
/* //use the reduction function in thrust to retrieve the summation of the prices */
/* dataType sum = thrust::reduce(devicePointerPrices + numOpt*numSamples, devicePointerPrices + (numOpt+1)*(numSamples), (dataType) 0, thrust::plus<dataType>()); */
/* dataType avgPrice = sum / numSamples; */
/* printf("avgPrice: %f\n", avgPrice); */
/* } */
//transfer data back to host
cudaMemcpy(samplePrices, samplePricesGpu, numSamples*sizeof(dataType), cudaMemcpyDeviceToHost);
cudaMemcpy(sampleWeights, sampleWeightsGpu, numSamples*sizeof(dataType), cudaMemcpyDeviceToHost);
cudaMemcpy(times, timesGpu, numSamples*sizeof(dataType), cudaMemcpyDeviceToHost);
//retrieve the average price
dataType cumPrice = 0.0f;
//add all the computed prices together
for (int numSamp = 0; numSamp < numSamples; numSamp++)
{
cumPrice += samplePrices[numSamp];
}
dataType avgPrice = cumPrice / numSamples;
printf("Average Price (GPU computation): %f\n\n", avgPrice);
}
//free memory space on the GPU
cudaFree(samplePricesGpu);
cudaFree(sampleWeightsGpu);
cudaFree(timesGpu);
cudaFree(optionStructsGpu);
//free memory space on the CPU
free(samplePrices);
free(sampleWeights);
free(times);
//declare pointers for data on CPU
dataType* samplePricesCpu;
dataType* sampleWeightsCpu;
dataType* timesCpu;
//allocate space for data on CPU
samplePricesCpu = (dataType*)malloc(numSamples*sizeof(dataType));
sampleWeightsCpu = (dataType*)malloc(numSamples*sizeof(dataType));
timesCpu = (dataType*)malloc(numSamples*sizeof(dataType));
gettimeofday(&start, NULL);
monteCarloGpuKernelCpu(samplePricesCpu, sampleWeightsCpu, timesCpu, (1.0f / (dataType)SEQUENCE_LENGTH), optionStructs, numSamples);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeCpu = ((seconds) * 1000 + ((dataType)useconds)/1000.0) + 0.5;
printf("Run on CPU\n");
printf("Processing time on CPU: %f (ms)\n", mtimeCpu);
//retrieve the average price
dataType cumPrice = 0.0f;
//add all the computed prices together
for (int numSamp = 0; numSamp < numSamples; numSamp++)
{
cumPrice += samplePricesCpu[numSamp];
}
dataType avgPrice = cumPrice / numSamples;
printf("Average Price (CPU computation): %f\n\n", avgPrice);
printf("Speedup on GPU: %f\n", mtimeCpu / mtimeGpu);
//free memory space on the CPU
free(samplePricesCpu);
free(sampleWeightsCpu);
free(timesCpu);
free(optionStructs);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runMonteCarlo();
char c;
c = getchar();
printf("%c\n", c);
}
|
409794380c1e8627302d60d26a65322660d85636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VectorQuantization.hh"
#define OUTPUT
#include "helper.hh"
namespace pqt {
/** default constructor */
VectorQuantization::VectorQuantization(uint _dim) :
d_dim(_dim), d_codeBook(NULL) {
}
VectorQuantization::~VectorQuantization() {
if (d_codeBook)
hipFree(d_codeBook);
}
//__device__ float sqr(const float &x) {
// return x * x;
//}
/** for all vectors in A compute distance to all vectors in B of dimension _dim */
__global__ void calcDistKernel(float* _res, const float * _A, const float * _B,
uint _Arows, uint _Brows, uint _dim) {
extern __shared__ float shm[];
float *Ablock = shm;
float *Bblock = shm + blockDim.x * blockDim.y;
float *AB = Bblock + blockDim.x * blockDim.y;
uint id = threadIdx.x + threadIdx.y * blockDim.x;
uint arow = threadIdx.y + blockIdx.y * blockDim.y;
uint brow = threadIdx.y + blockIdx.x * blockDim.x;
uint ocol = threadIdx.x + blockIdx.x * blockDim.x;
uint AOffs = threadIdx.y * blockDim.x;
uint BOffs = threadIdx.x * blockDim.x;
AB[id] = 0.;
int j = blockIdx.z;
{
// for (int j = 0; j < _Acols; j += blockDim.x) {
// load block of A and B
uint col = threadIdx.x + j * blockDim.x;
Bblock[id] = 0.;
Ablock[id] = 0.;
if (col < _dim) {
if (brow < _Brows)
Bblock[id] = _B[brow * _dim + col];
if (arow < _Arows)
Ablock[id] = _A[arow * _dim + col];
}
__syncthreads();
// if ((col < _Acols) && (arow < _Arows))
// printf(" A B %i %f %f \n", id, Ablock[id], Bblock[id]);
// compute partial differences
for (int i = 0; i < blockDim.x; i++) {
AB[id] += sqr(Ablock[AOffs + i] - Bblock[BOffs + i]);
}
__syncthreads();
}
// write out the result
if ((arow < _Arows) && (ocol < _Brows)) {
// _res[arow][ocol] += AB[id];
atomicAdd(_res + (arow * _Brows + ocol), AB[id]);
// printf(" AB %i %i %i %f \n", id, arow, ocol, AB[id]);
}
}
void VectorQuantization::calcDist(float* _res, const float* _A, const float* _B,
uint _Arows, uint _Brows, uint _dim) const {
const uint blockSize = 16;
dim3 block(blockSize, blockSize, 1);
uint shmSize = (blockSize * blockSize * 3) * sizeof(float);
//std::cout << "requested shm: " << shmSize << std::endl;
hipMemset(_res, 0, _Arows * _Brows * sizeof(float));
dim3 grid(idiv(_Brows, blockSize), idiv(_Arows, blockSize),
idiv(_dim, blockSize));
hipLaunchKernelGGL(( calcDistKernel), dim3(grid), dim3(block), shmSize, 0, _res, _A, _B, _Arows, _Brows,
_dim);
checkCudaErrors(hipDeviceSynchronize());
// outputMat("dist: ", _res, 10, 10);
}
/** blockd Id.x corresponds to the cluster center, blockId.y is used to span multiple kernels
* will update the codebook vector of this center. As each y block is only adding some of the input vectors the last block is normalizing the vector
*/__global__ void avgClusterKernel(float* _codebook, float * _count,
uint *_retirementCount, uint _yElem, uint _dim, const float * _A,
uint _N, uint* _assignd) {
__shared__ bool amLast;
extern __shared__ float shm[];
float count = 0;
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
shm[i] = 0.;
}
int stop = (blockIdx.y + 1) * _yElem;
stop = (stop < _N) ? stop : _N;
// accumulate the vectors that belong to this cluster center
for (int n = blockIdx.y * _yElem; n < stop; n++) {
if (_assignd[n] == blockIdx.x) {
const float *v = _A + n * _dim;
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
shm[i] += v[i];
}
if (threadIdx.x == 0)
count++;
}
}
// store the result
__syncthreads();
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
atomicAdd(_codebook + blockIdx.x * _dim + i, shm[i]);
}
__threadfence();
if (threadIdx.x == 0) {
atomicAdd(_count + blockIdx.x, count);
uint ticket = atomicInc(_retirementCount + blockIdx.x, gridDim.y);
// If the ticket ID is equal to the number of blocks, we are the last block!
amLast = (ticket == gridDim.y - 1);
}
__syncthreads();
// the last block is responsible for dividing by the number of vectors added to this center
if (amLast) {
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
_codebook[blockIdx.x * _dim + i] /= _count[blockIdx.x];
}
// reset retirement count for next iteration
if (threadIdx.x == 0) {
_retirementCount[blockIdx.x] = 0;
}
}
}
/** compute the maximum Radius for each cluster, one block per cluster
*/__global__ void maxRadKernel(float* _maxRad, const uint* _assign, uint _N,
const float* _distMat) {
extern __shared__ float shm[];
float* sharedMax = shm;
uint c = blockIdx.x;
float vMax = 0.;
for (uint i = threadIdx.x; i < _N; i += blockDim.x) {
if (_assign[i] == c) {
float vMax2 = _distMat[i* gridDim.x + c];
if (vMax2 > vMax) {
vMax = vMax2;
}
}
}
sharedMax[threadIdx.x] = vMax;
for (uint stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride) {
float vMax2 = sharedMax[threadIdx.x + stride];
if (vMax2 > vMax) {
vMax = vMax2;
sharedMax[threadIdx.x] = vMax;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
_maxRad[blockIdx.x] = sharedMax[0];
}
}
void VectorQuantization::getMaxRad(float *_maxRad, uint _nCluster, const uint* _assign, uint _N, const float* _distMat) const {
dim3 block(256, 1, 1);
dim3 grid(_nCluster, 1, 1);
uint shmsize = block.x * sizeof(float);
hipLaunchKernelGGL(( maxRadKernel), dim3(grid), dim3(block), shmsize, 0, _maxRad, _assign, _N, _distMat);
outputVec( "maxDist", _maxRad, _nCluster);
}
__global__ void assignKernel(uint* _assign, uint _N, const float* _distMat,
uint _nClusters) {
extern __shared__ float shm[];
float* sharedMin = shm;
uint* sharedIdx = (uint*) (shm + blockDim.x);
for (int row = blockIdx.x; row < _N; row += gridDim.x) {
// initialize with first element
const float* matRow = _distMat + row * _nClusters;
float vMin = matRow[0];
uint minIdx = 0;
for (uint i = threadIdx.x; i < _nClusters; i += blockDim.x) {
float vMin2 = matRow[i];
if (vMin2 < vMin) {
vMin = vMin2;
minIdx = i;
}
}
sharedMin[threadIdx.x] = vMin;
sharedIdx[threadIdx.x] = minIdx;
for (uint stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride) {
float vMin2 = sharedMin[threadIdx.x + stride];
if (vMin2 < vMin) {
vMin = vMin2;
sharedMin[threadIdx.x] = vMin;
sharedIdx[threadIdx.x] = sharedIdx[threadIdx.x + stride];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
_assign[row] = sharedIdx[0];
}
}
}
void VectorQuantization::getAssignment(uint* _assignd, const float* _distMat,
uint _N, uint _nClusters) const {
// perform a minimum reduction per vector _N
dim3 block;
setReductionBlocks(block, _nClusters);
uint nBlocks = (_N < MAX_BLOCKS) ? _N : MAX_BLOCKS;
dim3 grid(nBlocks, 1, 1);
uint shmSize = block.x * 2 * sizeof(float);
hipLaunchKernelGGL(( assignKernel), dim3(grid), dim3(block), shmSize, 0, _assignd, _N, _distMat, _nClusters);
checkCudaErrors(hipDeviceSynchronize());
}
__global__ void splitVectorKernel(float* _codeBook, uint _dim, uint _nClusters,
float _epsilon) {
uint idx = blockIdx.x * _dim + threadIdx.x;
float orig = _codeBook[idx];
_codeBook[idx] = orig * (1. + _epsilon);
idx += _nClusters * _dim;
_codeBook[idx] = orig * (1. - _epsilon);
}
void VectorQuantization::splitCodeBook(uint &_nClusters, float _epsilon) {
dim3 block(d_dim, 1, 1);
dim3 grid(_nClusters, 1, 1);
hipLaunchKernelGGL(( splitVectorKernel), dim3(grid), dim3(block), 0, 0, d_codeBook, d_dim, _nClusters, _epsilon);
_nClusters *= 2;
}
void VectorQuantization::createCodeBook(uint _k, const float* _A, uint _N) {
uint *assign = new uint[_N];
uint *old_assign = new uint[_N];
uint *assignd;
float* countd;
uint* retirementCountd;
float* distd;
float* maxRadd;
hipMalloc(&assignd, _N * sizeof(uint));
hipMalloc(&d_codeBook, _k * d_dim * sizeof(float));
hipMalloc(&countd, _k * sizeof(float));
hipMalloc(&retirementCountd, _k * sizeof(uint));
hipMalloc(&distd, _k * _N * sizeof(float));
hipMalloc(&maxRadd, _k * sizeof(float));
uint nClusters = 1;
// initialize to get the first cluster average
hipMemset(assignd, 0, _N * sizeof(uint));
hipMemset(retirementCountd, 0, _k * sizeof(uint));
hipMemset(countd, 0, _k * sizeof(int));
hipMemset(d_codeBook, 0, d_dim * sizeof(float));
uint yElem = 16;
dim3 block(d_dim, 1, 1);
dim3 grid(nClusters, idiv(_N, yElem), 1);
size_t shmSize = d_dim * sizeof(float);
hipLaunchKernelGGL(( avgClusterKernel), dim3(grid), dim3(block), shmSize, 0, d_codeBook, countd,
retirementCountd, yElem, d_dim, _A, _N, assignd);
float epsilon = 0.0001;
while (nClusters < _k) {
splitCodeBook(nClusters, epsilon);
std::cout << "nClusters" << nClusters << std::endl;
uint converged = 0;
do {
hipMemset(countd, 0, _k * sizeof(int));
calcDist(distd, _A, d_codeBook, _N, nClusters, d_dim);
getAssignment(assignd, distd, _N, nClusters);
// getMaxRad(maxRadd, nClusters, assignd, _N, distd);
//outputVecUint("Assign", assignd, _N);
uint yElem = 256;
dim3 block(d_dim, 1, 1);
dim3 grid(nClusters, idiv(_N, yElem), 1);
size_t shmSize = d_dim * sizeof(float);
hipLaunchKernelGGL(( avgClusterKernel), dim3(grid), dim3(block), shmSize, 0, d_codeBook, countd,
retirementCountd, yElem, d_dim, _A, _N, assignd);
std::cout << nClusters << std::endl;
// outputVec("count:", countd, nClusters);
//outputVec("avg: ", d_codeBook, d_dim);
hipMemcpy(assign, assignd, _N * sizeof(uint),
hipMemcpyDeviceToHost);
converged = 0;
for (int i = 0; i < _N; i++) {
if (assign[i] != old_assign[i]) {
converged++;
}
}
memcpy(old_assign, assign, _N * sizeof(uint));
std::cout << "non- converged" << converged << std::endl;
} while (converged > 0.001 * _N);
// outputMat("dist:", distd, _N, nClusters);
// outputMat("codebook", d_codeBook, nClusters, d_dim );
getMaxRad(maxRadd, nClusters, assignd, _N, distd);
}
hipFree(distd);
hipFree(countd);
hipFree(retirementCountd);
hipFree(assignd);
delete[] old_assign;
delete[] assign;
}
} /* namespace */
| 409794380c1e8627302d60d26a65322660d85636.cu | #include "VectorQuantization.hh"
#define OUTPUT
#include "helper.hh"
namespace pqt {
/** default constructor */
VectorQuantization::VectorQuantization(uint _dim) :
d_dim(_dim), d_codeBook(NULL) {
}
VectorQuantization::~VectorQuantization() {
if (d_codeBook)
cudaFree(d_codeBook);
}
//__device__ float sqr(const float &x) {
// return x * x;
//}
/** for all vectors in A compute distance to all vectors in B of dimension _dim */
__global__ void calcDistKernel(float* _res, const float * _A, const float * _B,
uint _Arows, uint _Brows, uint _dim) {
extern __shared__ float shm[];
float *Ablock = shm;
float *Bblock = shm + blockDim.x * blockDim.y;
float *AB = Bblock + blockDim.x * blockDim.y;
uint id = threadIdx.x + threadIdx.y * blockDim.x;
uint arow = threadIdx.y + blockIdx.y * blockDim.y;
uint brow = threadIdx.y + blockIdx.x * blockDim.x;
uint ocol = threadIdx.x + blockIdx.x * blockDim.x;
uint AOffs = threadIdx.y * blockDim.x;
uint BOffs = threadIdx.x * blockDim.x;
AB[id] = 0.;
int j = blockIdx.z;
{
// for (int j = 0; j < _Acols; j += blockDim.x) {
// load block of A and B
uint col = threadIdx.x + j * blockDim.x;
Bblock[id] = 0.;
Ablock[id] = 0.;
if (col < _dim) {
if (brow < _Brows)
Bblock[id] = _B[brow * _dim + col];
if (arow < _Arows)
Ablock[id] = _A[arow * _dim + col];
}
__syncthreads();
// if ((col < _Acols) && (arow < _Arows))
// printf(" A B %i %f %f \n", id, Ablock[id], Bblock[id]);
// compute partial differences
for (int i = 0; i < blockDim.x; i++) {
AB[id] += sqr(Ablock[AOffs + i] - Bblock[BOffs + i]);
}
__syncthreads();
}
// write out the result
if ((arow < _Arows) && (ocol < _Brows)) {
// _res[arow][ocol] += AB[id];
atomicAdd(_res + (arow * _Brows + ocol), AB[id]);
// printf(" AB %i %i %i %f \n", id, arow, ocol, AB[id]);
}
}
void VectorQuantization::calcDist(float* _res, const float* _A, const float* _B,
uint _Arows, uint _Brows, uint _dim) const {
const uint blockSize = 16;
dim3 block(blockSize, blockSize, 1);
uint shmSize = (blockSize * blockSize * 3) * sizeof(float);
//std::cout << "requested shm: " << shmSize << std::endl;
cudaMemset(_res, 0, _Arows * _Brows * sizeof(float));
dim3 grid(idiv(_Brows, blockSize), idiv(_Arows, blockSize),
idiv(_dim, blockSize));
calcDistKernel<<<grid, block, shmSize>>>(_res, _A, _B, _Arows, _Brows,
_dim);
checkCudaErrors(cudaDeviceSynchronize());
// outputMat("dist: ", _res, 10, 10);
}
/** blockd Id.x corresponds to the cluster center, blockId.y is used to span multiple kernels
* will update the codebook vector of this center. As each y block is only adding some of the input vectors the last block is normalizing the vector
*/__global__ void avgClusterKernel(float* _codebook, float * _count,
uint *_retirementCount, uint _yElem, uint _dim, const float * _A,
uint _N, uint* _assignd) {
__shared__ bool amLast;
extern __shared__ float shm[];
float count = 0;
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
shm[i] = 0.;
}
int stop = (blockIdx.y + 1) * _yElem;
stop = (stop < _N) ? stop : _N;
// accumulate the vectors that belong to this cluster center
for (int n = blockIdx.y * _yElem; n < stop; n++) {
if (_assignd[n] == blockIdx.x) {
const float *v = _A + n * _dim;
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
shm[i] += v[i];
}
if (threadIdx.x == 0)
count++;
}
}
// store the result
__syncthreads();
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
atomicAdd(_codebook + blockIdx.x * _dim + i, shm[i]);
}
__threadfence();
if (threadIdx.x == 0) {
atomicAdd(_count + blockIdx.x, count);
uint ticket = atomicInc(_retirementCount + blockIdx.x, gridDim.y);
// If the ticket ID is equal to the number of blocks, we are the last block!
amLast = (ticket == gridDim.y - 1);
}
__syncthreads();
// the last block is responsible for dividing by the number of vectors added to this center
if (amLast) {
for (int i = threadIdx.x; i < _dim; i += blockDim.x) {
_codebook[blockIdx.x * _dim + i] /= _count[blockIdx.x];
}
// reset retirement count for next iteration
if (threadIdx.x == 0) {
_retirementCount[blockIdx.x] = 0;
}
}
}
/** compute the maximum Radius for each cluster, one block per cluster
*/__global__ void maxRadKernel(float* _maxRad, const uint* _assign, uint _N,
const float* _distMat) {
extern __shared__ float shm[];
float* sharedMax = shm;
uint c = blockIdx.x;
float vMax = 0.;
for (uint i = threadIdx.x; i < _N; i += blockDim.x) {
if (_assign[i] == c) {
float vMax2 = _distMat[i* gridDim.x + c];
if (vMax2 > vMax) {
vMax = vMax2;
}
}
}
sharedMax[threadIdx.x] = vMax;
for (uint stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride) {
float vMax2 = sharedMax[threadIdx.x + stride];
if (vMax2 > vMax) {
vMax = vMax2;
sharedMax[threadIdx.x] = vMax;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
_maxRad[blockIdx.x] = sharedMax[0];
}
}
void VectorQuantization::getMaxRad(float *_maxRad, uint _nCluster, const uint* _assign, uint _N, const float* _distMat) const {
dim3 block(256, 1, 1);
dim3 grid(_nCluster, 1, 1);
uint shmsize = block.x * sizeof(float);
maxRadKernel<<<grid, block, shmsize>>>(_maxRad, _assign, _N, _distMat);
outputVec( "maxDist", _maxRad, _nCluster);
}
__global__ void assignKernel(uint* _assign, uint _N, const float* _distMat,
uint _nClusters) {
extern __shared__ float shm[];
float* sharedMin = shm;
uint* sharedIdx = (uint*) (shm + blockDim.x);
for (int row = blockIdx.x; row < _N; row += gridDim.x) {
// initialize with first element
const float* matRow = _distMat + row * _nClusters;
float vMin = matRow[0];
uint minIdx = 0;
for (uint i = threadIdx.x; i < _nClusters; i += blockDim.x) {
float vMin2 = matRow[i];
if (vMin2 < vMin) {
vMin = vMin2;
minIdx = i;
}
}
sharedMin[threadIdx.x] = vMin;
sharedIdx[threadIdx.x] = minIdx;
for (uint stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride) {
float vMin2 = sharedMin[threadIdx.x + stride];
if (vMin2 < vMin) {
vMin = vMin2;
sharedMin[threadIdx.x] = vMin;
sharedIdx[threadIdx.x] = sharedIdx[threadIdx.x + stride];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
_assign[row] = sharedIdx[0];
}
}
}
void VectorQuantization::getAssignment(uint* _assignd, const float* _distMat,
uint _N, uint _nClusters) const {
// perform a minimum reduction per vector _N
dim3 block;
setReductionBlocks(block, _nClusters);
uint nBlocks = (_N < MAX_BLOCKS) ? _N : MAX_BLOCKS;
dim3 grid(nBlocks, 1, 1);
uint shmSize = block.x * 2 * sizeof(float);
assignKernel<<<grid, block, shmSize>>>(_assignd, _N, _distMat, _nClusters);
checkCudaErrors(cudaDeviceSynchronize());
}
__global__ void splitVectorKernel(float* _codeBook, uint _dim, uint _nClusters,
float _epsilon) {
uint idx = blockIdx.x * _dim + threadIdx.x;
float orig = _codeBook[idx];
_codeBook[idx] = orig * (1. + _epsilon);
idx += _nClusters * _dim;
_codeBook[idx] = orig * (1. - _epsilon);
}
void VectorQuantization::splitCodeBook(uint &_nClusters, float _epsilon) {
dim3 block(d_dim, 1, 1);
dim3 grid(_nClusters, 1, 1);
splitVectorKernel<<<grid, block>>>(d_codeBook, d_dim, _nClusters, _epsilon);
_nClusters *= 2;
}
void VectorQuantization::createCodeBook(uint _k, const float* _A, uint _N) {
uint *assign = new uint[_N];
uint *old_assign = new uint[_N];
uint *assignd;
float* countd;
uint* retirementCountd;
float* distd;
float* maxRadd;
cudaMalloc(&assignd, _N * sizeof(uint));
cudaMalloc(&d_codeBook, _k * d_dim * sizeof(float));
cudaMalloc(&countd, _k * sizeof(float));
cudaMalloc(&retirementCountd, _k * sizeof(uint));
cudaMalloc(&distd, _k * _N * sizeof(float));
cudaMalloc(&maxRadd, _k * sizeof(float));
uint nClusters = 1;
// initialize to get the first cluster average
cudaMemset(assignd, 0, _N * sizeof(uint));
cudaMemset(retirementCountd, 0, _k * sizeof(uint));
cudaMemset(countd, 0, _k * sizeof(int));
cudaMemset(d_codeBook, 0, d_dim * sizeof(float));
uint yElem = 16;
dim3 block(d_dim, 1, 1);
dim3 grid(nClusters, idiv(_N, yElem), 1);
size_t shmSize = d_dim * sizeof(float);
avgClusterKernel<<<grid, block, shmSize>>>(d_codeBook, countd,
retirementCountd, yElem, d_dim, _A, _N, assignd);
float epsilon = 0.0001;
while (nClusters < _k) {
splitCodeBook(nClusters, epsilon);
std::cout << "nClusters" << nClusters << std::endl;
uint converged = 0;
do {
cudaMemset(countd, 0, _k * sizeof(int));
calcDist(distd, _A, d_codeBook, _N, nClusters, d_dim);
getAssignment(assignd, distd, _N, nClusters);
// getMaxRad(maxRadd, nClusters, assignd, _N, distd);
//outputVecUint("Assign", assignd, _N);
uint yElem = 256;
dim3 block(d_dim, 1, 1);
dim3 grid(nClusters, idiv(_N, yElem), 1);
size_t shmSize = d_dim * sizeof(float);
avgClusterKernel<<<grid, block, shmSize>>>(d_codeBook, countd,
retirementCountd, yElem, d_dim, _A, _N, assignd);
std::cout << nClusters << std::endl;
// outputVec("count:", countd, nClusters);
//outputVec("avg: ", d_codeBook, d_dim);
cudaMemcpy(assign, assignd, _N * sizeof(uint),
cudaMemcpyDeviceToHost);
converged = 0;
for (int i = 0; i < _N; i++) {
if (assign[i] != old_assign[i]) {
converged++;
}
}
memcpy(old_assign, assign, _N * sizeof(uint));
std::cout << "non- converged" << converged << std::endl;
} while (converged > 0.001 * _N);
// outputMat("dist:", distd, _N, nClusters);
// outputMat("codebook", d_codeBook, nClusters, d_dim );
getMaxRad(maxRadd, nClusters, assignd, _N, distd);
}
cudaFree(distd);
cudaFree(countd);
cudaFree(retirementCountd);
cudaFree(assignd);
delete[] old_assign;
delete[] assign;
}
} /* namespace */
|
2ec6a13b25429d74b69c39f9a8ea769049770846.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "Kernel3d.cuh"
#include "constants.h"
#include "Kernel3dGPU.h"
#include <hip/hip_cooperative_groups.h>
#include <helper_functions.h>
#include <helper_cuda.h>
namespace cg = cooperative_groups;
// Note: If you change the RADIUS, you should also change the unrolling below
// __constant__ float stencil[RADIUS + 1];
void writeDT(float val){
checkCudaErrors(hipMemcpyToSymbol(DT, &val, sizeof(float)));
}
void writeDS(float val){
checkCudaErrors(hipMemcpyToSymbol(DS, &val, sizeof(float)));
}
void writeZN(float val){
checkCudaErrors(hipMemcpyToSymbol(ZN, &val, sizeof(float)));
}
void writeNumExcitors(int val){
checkCudaErrors(hipMemcpyToSymbol(numExcitor, &val, sizeof(int)));
}
void writeExcitorMode(int val){
checkCudaErrors(hipMemcpyToSymbol(excitorMode, &val, sizeof(int)));
}
__device__ int getBitsCUDA(int val, int shift, int mask){
return (val >> shift) & mask;
}
__device__ float getSigmaCUDA(int aux){
float PML_SCALE = (0.5 / DT / PML);
int sigmaN = getBitsCUDA(aux, SIGMA_SHIFT, THREE_BIT);
return PML_SCALE * (sigmaN);
}
__device__ int getBetaCUDA(int aux){
return getBitsCUDA(aux, BETA_SHIFT, ONE_BIT);
}
__device__ int getExcitorCUDA(int aux){
return getBitsCUDA(aux, EXCITE_SHIFT, ONE_BIT);
}
__device__ float pressureStep(
float p,
float v_x,
float v_x_left,
float v_y,
float v_y_up,
float v_z,
float v_z_behind,
int beta,
float sigma
)
{
float COEFF_DIVERGENCE = RHO * Cs * Cs * DT/DS;
float divergence = v_x + v_y + v_z - v_x_left - v_y_up - v_z_behind;
float p_denom = 1 + (1 - beta + sigma) * DT;
return (p - COEFF_DIVERGENCE * divergence)/p_denom;
}
// __device__ float pressureStep2(
// int i,
// float4 *input,
// int *aux
// int stride_x,
// int stride_y
// ){
// float p = input[i].x;
// float divergence =
// input[i].y - input[i - 1].y
// + input[i].z - input[i - stride_y].z
// + input[i].w - input[i - stride_z].w;
// float sigma = getSigmaCUDA(aux[i]);
// float beta = getBetaCUDA(aux[i]);
// float sigma_prime = 1 - beta + sigma;
// float p_next = (p - RHO * Cs * Cs * DT * divergence)/(1 + sigma_prime * DT);
// return p_next;
// }
// __device__ float vStep2(
// int i,
// float4 *input,
// int *aux
// int stride
// ){
// float v = input[i].y;
// float beta = min(getBetaCUDA(aux[i]), getBetaCUDA(aux[i + stride]));
// float grad_x =
// }
__global__ void AudioKernel3D(
float4 *input,
float4 *output,
int *inputAux,
float *audioBuffer,
const int dimx,
const int dimy,
const int dimz,
int iter,
int i_global_listener,
int i_global_p_bore,
float p_mouth
)
{
const float COEFF_GRADIENT = (DT/RHO/DS);
const float ADMITTANCE = (1.0/ZN);
// printf("DT %f\n", DT);
// printf("DS %f\n", DS);
// printf("ZN %f\n", ZN);
bool validr = true;
bool validw = true;
const int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
const int gtidy = blockIdx.y * blockDim.y + threadIdx.y;
const int ltidx = threadIdx.x;
const int ltidy = threadIdx.y;
const int workx = blockDim.x;
const int worky = blockDim.y;
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float4 tile[k_blockDimMaxY + 2 * RADIUS][k_blockDimX + 2 * RADIUS];
const int stride_y = dimx + 2 * RADIUS;
const int stride_z = stride_y * (dimy + 2 * RADIUS);
int inputIndex = 0;
int outputIndex = 0;
// Advance inputIndex to start of inner volume
inputIndex += RADIUS * stride_y + RADIUS;
// Advance inputIndex to target element
inputIndex += gtidy * stride_y + gtidx;
float4 infront[RADIUS];
float4 behind[RADIUS];
float4 current;
const int tx = ltidx + RADIUS;
const int ty = ltidy + RADIUS;
// Check in bounds
//can't read from here
if ((gtidx >= dimx + RADIUS) || (gtidy >= dimy + RADIUS))
validr = false;
//can't read or write here
if ((gtidx >= dimx) || (gtidy >= dimy))
validw = false;
//outside dim + radius: can't read or write
//outside dim but within: can't write, but can read!
// Preload the "infront" and "behind" data
//what's this for?
//preload the behind at the beginning
//advance z layer to where it actually starts
//this loop runs RADIUS - 1 times
//cuz we later do another += stride_z to make it RADIUS total
for (int i = RADIUS - 2 ; i >= 0 ; i--)
{
if (validr) //if this is actually a valid read
behind[i] = input[inputIndex];
inputIndex += stride_z; //
}
if (validr)
current = input[inputIndex];
//Note at this point, current is at input index =
//RADIUS * (STRIDE_X = 1) + RADIUS * strideY + (RADIUS - 1) * strideZ
//It's at RADIUS - 1 for strideZ because the for loop starts off with an advance
//so the first iteration will start at the right location!
outputIndex = inputIndex;
inputIndex += stride_z;
//by here input index should be inputIndex += RADIUS * stride_z
//now read the things that are inputIndex += stride_z ahead
for (int i = 0 ; i < RADIUS ; i++)
{
if (validr)
infront[i] = input[inputIndex];
inputIndex += stride_z;
}
//inputIndex is now offset by 2 * RADIUS * stride_z
//inputIndex represents the furthests most position of the infront
// Step through the xy-planes
#pragma unroll 9
for (int iz = 0 ; iz < dimz ; iz++)
{
// Advance the slice (move the thread-front)
for (int i = RADIUS - 1 ; i > 0 ; i--)
behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
#pragma unroll 2
for (int i = 0 ; i < RADIUS - 1 ; i++)
infront[i] = infront[i + 1];
if (validr)
infront[RADIUS - 1] = input[inputIndex];
inputIndex += stride_z;
outputIndex += stride_z;
cg::sync(cta);
// Note that for the work items on the boundary of the problem, the
// supplied index when reading the halo (below) may wrap to the
// previous/next row or even the previous/next xy-plane. This is
// acceptable since a) we disable the output write for these work
// items and b) there is at least one xy-plane before/after the
// current plane, so the access will be within bounds.
// Update the data slice in the local tile
// Halo above & below
if (ltidy < RADIUS)
{
tile[ltidy][tx] = input[outputIndex - RADIUS * stride_y];
tile[ltidy + worky + RADIUS][tx] = input[outputIndex + worky * stride_y];
}
// Halo left & right
if (ltidx < RADIUS)
{
tile[ty][ltidx] = input[outputIndex - RADIUS];
tile[ty][ltidx + workx + RADIUS] = input[outputIndex + workx];
}
tile[ty][tx] = current;
cg::sync(cta);
int aux = inputAux[outputIndex];
int auxRight = inputAux[outputIndex + 1];
int auxDown = inputAux[outputIndex + stride_y];
int auxInfront = inputAux[outputIndex + stride_z];
float4 value = current;
float4 valueLeft = tile[ty][tx - 1];
float4 valueRight = tile[ty][tx + 1];
float4 valueUp = tile[ty - 1][tx];
float4 valueDown = tile[ty + 1][tx];
float4 valueInfront = infront[0];
float4 valueBehind = behind[0];
float4 valueRightUp = tile[ty - 1][tx + 1];
float4 valueRightBehind = input[outputIndex + 1 - stride_z]; //fix: expensive global memory load
float4 valueDownLeft = tile[ty + 1][tx - 1];
float4 valueDownBehind = input[outputIndex + stride_y - stride_z]; //fix: expensive global memory load
float4 valueInfrontLeft = input[outputIndex - 1 + stride_z]; //fix
float4 valueInfrontUp = input[outputIndex - stride_y + stride_z]; //fix
float newPressure = pressureStep(
value.x,
value.y, valueLeft.y, //don't be confused, v_x is y oops
value.z, valueUp.z,
value.w, valueBehind.w,
getBetaCUDA(aux),
getSigmaCUDA(aux)
);
// if(abs(input[outputIndex + 1].x - valueRight.x) > 0.00001){
// newPressure = 0;
// }
float newPressureRight = pressureStep(
valueRight.x,
valueRight.y, value.y, //don't be confused, v_x is y oops
valueRight.z, valueRightUp.z,
valueRight.w, valueRightBehind.w,
getBetaCUDA(auxRight),
getSigmaCUDA(auxRight)
);
float newPressureDown = pressureStep(
valueDown.x,
valueDown.y, valueDownLeft.y, //don't be confused, v_x is y oops
valueDown.z, value.z,
valueDown.w, valueDownBehind.w,
getBetaCUDA(auxDown),
getSigmaCUDA(auxDown)
);
float newPressureInfront = pressureStep(
valueInfront.x,
valueInfront.y, valueInfrontLeft.y, //don't be confused, v_x is y oops
valueInfront.z, valueInfrontUp.z,
valueInfront.w, value.w,
getBetaCUDA(auxInfront),
getSigmaCUDA(auxInfront)
);
int isExcitor = getExcitorCUDA(aux);
int isBeta = getBetaCUDA(aux);
//int beta_vx_dir = getBitsCUDA(aux, BETA_VX_LEVEL, TWO_BIT) - 1;
//int beta_vx_n = getBitsCUDA(aux, BETA_VX_NORMALIZE, TWO_BIT);
//int beta_vy_dir = getBitsCUDA(aux, BETA_VY_LEVEL, TWO_BIT) - 1;
//int beta_vy_n = getBitsCUDA(aux, BETA_VY_NORMALIZE, TWO_BIT);
//int beta_vz_dir = getBitsCUDA(aux, BETA_VZ_LEVEL, TWO_BIT) - 1;
//int beta_vz_n = getBitsCUDA(aux, BETA_VZ_NORMALIZE, TWO_BIT);
// if beta_vx_dir = 1, selects first term, if -1 selects other term
// float vb_x = (max(beta_vx_dir, 0) * newPressure + min(beta_vx_dir, 0) * newPressureRight) * ADMITTANCE;
// float vb_y = (max(beta_vy_dir, 0) * newPressure + min(beta_vy_dir, 0) * newPressureDown) * ADMITTANCE;
// float vb_z = (max(beta_vz_dir, 0) * newPressure + min(beta_vz_dir, 0) * newPressureInfront) * ADMITTANCE;
float vb_x = 0;
float vb_y = 0;
float vb_z = 0;
vb_x =
(- newPressureRight * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxRight));
vb_y =
(- newPressureDown * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxDown));
vb_z =
(- newPressureInfront * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxInfront));
// if(getBetaCUDA(aux)){
// vb_x = - newPressure * ADMITTANCE;
// vb_y = - newPressure * ADMITTANCE;
// }
// if(getBetaCUDA(auxDown)){
// vb_x =
// }
if(isExcitor == 1){
// const int ubore_filter_position = 10 * 44100 - 1 - 4; //super hacky use audio buffer as global
if(excitorMode == 0){
float delta_p = max(p_mouth - input[i_global_p_bore].x, 0.0f);
// float delta_p_mod = max(0.05 * DELTA_P_MAX, delta_p);
float shelf = 0.5 + 0.5 * tanh(4 * (-1 + (DELTA_P_MAX - delta_p)/(0.01 * DELTA_P_MAX))); //unclear if DELTA_P_MAX is in the denominator...
// float shelf = 1;
float u_bore = shelf * W_J * H_R * max((1 - delta_p / DELTA_P_MAX), 0.0) * sqrt(2 * delta_p / RHO) ;
float excitation = u_bore / (DS * DS * numExcitor); //hashtag units!
float in = excitation;
//use audio buffer as extra stroage
int zIndex = 13 * 44100 * 5;
float z1 = 0;
float z2 = 0;
float out = 0;
if(iter % 2 == 0){
z1 = audioBuffer[zIndex];
z2 = audioBuffer[zIndex + 1];
out = in * a0 + z1;
audioBuffer[zIndex + 2] = in * a1 + z2 - b1 * out;
audioBuffer[zIndex + 3] = in * a2 - b2 * out;
}
else{
z1 = audioBuffer[zIndex + 2];
z2 = audioBuffer[zIndex + 3];
out = in * a0 + z1;
audioBuffer[zIndex] = in * a1 + z2 - b1 * out;
audioBuffer[zIndex + 1] = in * a2 - b2 * out;
}
//filtered output
vb_z = out;
vb_y = 0;
vb_x = 0;
}else if(excitorMode == 1){
vb_z = sin(iter * DT * 2 * 3.14159265359 * 440);
vb_y = 0;
vb_x = 0;
}else{
}
// printf("delta_p %f, p_mouth %f, p_bore %f, u_bore %f\n", delta_p, p_mouth/3000.0, input[i_global_p_bore].x, u_bore);
}
newPressure = isBeta * newPressure; //hard set wall pressures to zero
//velocity computations next to beta cells should not rely on pressure equation, so pressure doesn't matter
// else{
// vb_z += (max(beta_vz_dir, 0) * value.x + min(beta_vz_dir, 0) * valueInfront.x) * ADMITTANCE;
// }
float sigma = getSigmaCUDA(aux);
int beta_x = min(isBeta, getBetaCUDA(auxRight)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_x = newPressureRight - newPressure;
float sigma_prime_dt_x = (1 - beta_x + sigma) * DT;
float current_vx = value.y;
// float new_vx = beta_x * ( current_vx - COEFF_GRADIENT * grad_x + sigma_prime_dt_x * vb_x)/(beta_x + sigma_prime_dt_x);
float new_vx = (beta_x * current_vx - beta_x * COEFF_GRADIENT * grad_x + sigma_prime_dt_x * vb_x)/(beta_x + sigma_prime_dt_x);
int beta_y = min(isBeta, getBetaCUDA(auxDown)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_y = newPressureDown - newPressure;
float sigma_prime_dt_y = (1 - beta_y + sigma) * DT;
float current_vy = value.z;
float new_vy = (beta_y * current_vy - beta_y * COEFF_GRADIENT * grad_y + sigma_prime_dt_y * vb_y)/(beta_y + sigma_prime_dt_y);
int beta_z = min(isBeta, getBetaCUDA(auxInfront)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_z = newPressureInfront - newPressure;
float sigma_prime_dt_z = (1 - beta_z + sigma) * DT;
float current_vz = value.w;
float new_vz = (beta_z * current_vz - beta_z * COEFF_GRADIENT * grad_z + sigma_prime_dt_z * vb_z)/(beta_z + sigma_prime_dt_z);
value.x = newPressure;
value.y = new_vx;
value.z = new_vy;
value.w = new_vz;
if(validw)
output[outputIndex] = value;
// if(iter % OVERSAMPLE == 0 && outputIndex == i_global_listener){
// audioBuffer[iter/OVERSAMPLE] = newPressure;
// }
if(outputIndex == i_global_listener){
audioBuffer[iter] = newPressure;
}
// // Compute the output value
// float4 value = stencil[0] * current;
// #pragma unroll 4
// for (int i = 1 ; i <= RADIUS ; i++)
// {
// value += stencil[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] + tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]);
// }
// // Store the output value
// if (validw)
// output[outputIndex] = value;
}
}
| 2ec6a13b25429d74b69c39f9a8ea769049770846.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "Kernel3d.cuh"
#include "constants.h"
#include "Kernel3dGPU.h"
#include <cooperative_groups.h>
#include <helper_functions.h>
#include <helper_cuda.h>
namespace cg = cooperative_groups;
// Note: If you change the RADIUS, you should also change the unrolling below
// __constant__ float stencil[RADIUS + 1];
void writeDT(float val){
checkCudaErrors(cudaMemcpyToSymbol(DT, &val, sizeof(float)));
}
void writeDS(float val){
checkCudaErrors(cudaMemcpyToSymbol(DS, &val, sizeof(float)));
}
void writeZN(float val){
checkCudaErrors(cudaMemcpyToSymbol(ZN, &val, sizeof(float)));
}
void writeNumExcitors(int val){
checkCudaErrors(cudaMemcpyToSymbol(numExcitor, &val, sizeof(int)));
}
void writeExcitorMode(int val){
checkCudaErrors(cudaMemcpyToSymbol(excitorMode, &val, sizeof(int)));
}
__device__ int getBitsCUDA(int val, int shift, int mask){
return (val >> shift) & mask;
}
__device__ float getSigmaCUDA(int aux){
float PML_SCALE = (0.5 / DT / PML);
int sigmaN = getBitsCUDA(aux, SIGMA_SHIFT, THREE_BIT);
return PML_SCALE * (sigmaN);
}
__device__ int getBetaCUDA(int aux){
return getBitsCUDA(aux, BETA_SHIFT, ONE_BIT);
}
__device__ int getExcitorCUDA(int aux){
return getBitsCUDA(aux, EXCITE_SHIFT, ONE_BIT);
}
__device__ float pressureStep(
float p,
float v_x,
float v_x_left,
float v_y,
float v_y_up,
float v_z,
float v_z_behind,
int beta,
float sigma
)
{
float COEFF_DIVERGENCE = RHO * Cs * Cs * DT/DS;
float divergence = v_x + v_y + v_z - v_x_left - v_y_up - v_z_behind;
float p_denom = 1 + (1 - beta + sigma) * DT;
return (p - COEFF_DIVERGENCE * divergence)/p_denom;
}
// __device__ float pressureStep2(
// int i,
// float4 *input,
// int *aux
// int stride_x,
// int stride_y
// ){
// float p = input[i].x;
// float divergence =
// input[i].y - input[i - 1].y
// + input[i].z - input[i - stride_y].z
// + input[i].w - input[i - stride_z].w;
// float sigma = getSigmaCUDA(aux[i]);
// float beta = getBetaCUDA(aux[i]);
// float sigma_prime = 1 - beta + sigma;
// float p_next = (p - RHO * Cs * Cs * DT * divergence)/(1 + sigma_prime * DT);
// return p_next;
// }
// __device__ float vStep2(
// int i,
// float4 *input,
// int *aux
// int stride
// ){
// float v = input[i].y;
// float beta = min(getBetaCUDA(aux[i]), getBetaCUDA(aux[i + stride]));
// float grad_x =
// }
__global__ void AudioKernel3D(
float4 *input,
float4 *output,
int *inputAux,
float *audioBuffer,
const int dimx,
const int dimy,
const int dimz,
int iter,
int i_global_listener,
int i_global_p_bore,
float p_mouth
)
{
const float COEFF_GRADIENT = (DT/RHO/DS);
const float ADMITTANCE = (1.0/ZN);
// printf("DT %f\n", DT);
// printf("DS %f\n", DS);
// printf("ZN %f\n", ZN);
bool validr = true;
bool validw = true;
const int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
const int gtidy = blockIdx.y * blockDim.y + threadIdx.y;
const int ltidx = threadIdx.x;
const int ltidy = threadIdx.y;
const int workx = blockDim.x;
const int worky = blockDim.y;
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float4 tile[k_blockDimMaxY + 2 * RADIUS][k_blockDimX + 2 * RADIUS];
const int stride_y = dimx + 2 * RADIUS;
const int stride_z = stride_y * (dimy + 2 * RADIUS);
int inputIndex = 0;
int outputIndex = 0;
// Advance inputIndex to start of inner volume
inputIndex += RADIUS * stride_y + RADIUS;
// Advance inputIndex to target element
inputIndex += gtidy * stride_y + gtidx;
float4 infront[RADIUS];
float4 behind[RADIUS];
float4 current;
const int tx = ltidx + RADIUS;
const int ty = ltidy + RADIUS;
// Check in bounds
//can't read from here
if ((gtidx >= dimx + RADIUS) || (gtidy >= dimy + RADIUS))
validr = false;
//can't read or write here
if ((gtidx >= dimx) || (gtidy >= dimy))
validw = false;
//outside dim + radius: can't read or write
//outside dim but within: can't write, but can read!
// Preload the "infront" and "behind" data
//what's this for?
//preload the behind at the beginning
//advance z layer to where it actually starts
//this loop runs RADIUS - 1 times
//cuz we later do another += stride_z to make it RADIUS total
for (int i = RADIUS - 2 ; i >= 0 ; i--)
{
if (validr) //if this is actually a valid read
behind[i] = input[inputIndex];
inputIndex += stride_z; //
}
if (validr)
current = input[inputIndex];
//Note at this point, current is at input index =
//RADIUS * (STRIDE_X = 1) + RADIUS * strideY + (RADIUS - 1) * strideZ
//It's at RADIUS - 1 for strideZ because the for loop starts off with an advance
//so the first iteration will start at the right location!
outputIndex = inputIndex;
inputIndex += stride_z;
//by here input index should be inputIndex += RADIUS * stride_z
//now read the things that are inputIndex += stride_z ahead
for (int i = 0 ; i < RADIUS ; i++)
{
if (validr)
infront[i] = input[inputIndex];
inputIndex += stride_z;
}
//inputIndex is now offset by 2 * RADIUS * stride_z
//inputIndex represents the furthests most position of the infront
// Step through the xy-planes
#pragma unroll 9
for (int iz = 0 ; iz < dimz ; iz++)
{
// Advance the slice (move the thread-front)
for (int i = RADIUS - 1 ; i > 0 ; i--)
behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
#pragma unroll 2
for (int i = 0 ; i < RADIUS - 1 ; i++)
infront[i] = infront[i + 1];
if (validr)
infront[RADIUS - 1] = input[inputIndex];
inputIndex += stride_z;
outputIndex += stride_z;
cg::sync(cta);
// Note that for the work items on the boundary of the problem, the
// supplied index when reading the halo (below) may wrap to the
// previous/next row or even the previous/next xy-plane. This is
// acceptable since a) we disable the output write for these work
// items and b) there is at least one xy-plane before/after the
// current plane, so the access will be within bounds.
// Update the data slice in the local tile
// Halo above & below
if (ltidy < RADIUS)
{
tile[ltidy][tx] = input[outputIndex - RADIUS * stride_y];
tile[ltidy + worky + RADIUS][tx] = input[outputIndex + worky * stride_y];
}
// Halo left & right
if (ltidx < RADIUS)
{
tile[ty][ltidx] = input[outputIndex - RADIUS];
tile[ty][ltidx + workx + RADIUS] = input[outputIndex + workx];
}
tile[ty][tx] = current;
cg::sync(cta);
int aux = inputAux[outputIndex];
int auxRight = inputAux[outputIndex + 1];
int auxDown = inputAux[outputIndex + stride_y];
int auxInfront = inputAux[outputIndex + stride_z];
float4 value = current;
float4 valueLeft = tile[ty][tx - 1];
float4 valueRight = tile[ty][tx + 1];
float4 valueUp = tile[ty - 1][tx];
float4 valueDown = tile[ty + 1][tx];
float4 valueInfront = infront[0];
float4 valueBehind = behind[0];
float4 valueRightUp = tile[ty - 1][tx + 1];
float4 valueRightBehind = input[outputIndex + 1 - stride_z]; //fix: expensive global memory load
float4 valueDownLeft = tile[ty + 1][tx - 1];
float4 valueDownBehind = input[outputIndex + stride_y - stride_z]; //fix: expensive global memory load
float4 valueInfrontLeft = input[outputIndex - 1 + stride_z]; //fix
float4 valueInfrontUp = input[outputIndex - stride_y + stride_z]; //fix
float newPressure = pressureStep(
value.x,
value.y, valueLeft.y, //don't be confused, v_x is y oops
value.z, valueUp.z,
value.w, valueBehind.w,
getBetaCUDA(aux),
getSigmaCUDA(aux)
);
// if(abs(input[outputIndex + 1].x - valueRight.x) > 0.00001){
// newPressure = 0;
// }
float newPressureRight = pressureStep(
valueRight.x,
valueRight.y, value.y, //don't be confused, v_x is y oops
valueRight.z, valueRightUp.z,
valueRight.w, valueRightBehind.w,
getBetaCUDA(auxRight),
getSigmaCUDA(auxRight)
);
float newPressureDown = pressureStep(
valueDown.x,
valueDown.y, valueDownLeft.y, //don't be confused, v_x is y oops
valueDown.z, value.z,
valueDown.w, valueDownBehind.w,
getBetaCUDA(auxDown),
getSigmaCUDA(auxDown)
);
float newPressureInfront = pressureStep(
valueInfront.x,
valueInfront.y, valueInfrontLeft.y, //don't be confused, v_x is y oops
valueInfront.z, valueInfrontUp.z,
valueInfront.w, value.w,
getBetaCUDA(auxInfront),
getSigmaCUDA(auxInfront)
);
int isExcitor = getExcitorCUDA(aux);
int isBeta = getBetaCUDA(aux);
//int beta_vx_dir = getBitsCUDA(aux, BETA_VX_LEVEL, TWO_BIT) - 1;
//int beta_vx_n = getBitsCUDA(aux, BETA_VX_NORMALIZE, TWO_BIT);
//int beta_vy_dir = getBitsCUDA(aux, BETA_VY_LEVEL, TWO_BIT) - 1;
//int beta_vy_n = getBitsCUDA(aux, BETA_VY_NORMALIZE, TWO_BIT);
//int beta_vz_dir = getBitsCUDA(aux, BETA_VZ_LEVEL, TWO_BIT) - 1;
//int beta_vz_n = getBitsCUDA(aux, BETA_VZ_NORMALIZE, TWO_BIT);
// if beta_vx_dir = 1, selects first term, if -1 selects other term
// float vb_x = (max(beta_vx_dir, 0) * newPressure + min(beta_vx_dir, 0) * newPressureRight) * ADMITTANCE;
// float vb_y = (max(beta_vy_dir, 0) * newPressure + min(beta_vy_dir, 0) * newPressureDown) * ADMITTANCE;
// float vb_z = (max(beta_vz_dir, 0) * newPressure + min(beta_vz_dir, 0) * newPressureInfront) * ADMITTANCE;
float vb_x = 0;
float vb_y = 0;
float vb_z = 0;
vb_x =
(- newPressureRight * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxRight));
vb_y =
(- newPressureDown * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxDown));
vb_z =
(- newPressureInfront * ADMITTANCE) * (1 - getBetaCUDA(aux))
+ (newPressure * ADMITTANCE) * (1 - getBetaCUDA(auxInfront));
// if(getBetaCUDA(aux)){
// vb_x = - newPressure * ADMITTANCE;
// vb_y = - newPressure * ADMITTANCE;
// }
// if(getBetaCUDA(auxDown)){
// vb_x =
// }
if(isExcitor == 1){
// const int ubore_filter_position = 10 * 44100 - 1 - 4; //super hacky use audio buffer as global
if(excitorMode == 0){
float delta_p = max(p_mouth - input[i_global_p_bore].x, 0.0f);
// float delta_p_mod = max(0.05 * DELTA_P_MAX, delta_p);
float shelf = 0.5 + 0.5 * tanh(4 * (-1 + (DELTA_P_MAX - delta_p)/(0.01 * DELTA_P_MAX))); //unclear if DELTA_P_MAX is in the denominator...
// float shelf = 1;
float u_bore = shelf * W_J * H_R * max((1 - delta_p / DELTA_P_MAX), 0.0) * sqrt(2 * delta_p / RHO) ;
float excitation = u_bore / (DS * DS * numExcitor); //hashtag units!
float in = excitation;
//use audio buffer as extra stroage
int zIndex = 13 * 44100 * 5;
float z1 = 0;
float z2 = 0;
float out = 0;
if(iter % 2 == 0){
z1 = audioBuffer[zIndex];
z2 = audioBuffer[zIndex + 1];
out = in * a0 + z1;
audioBuffer[zIndex + 2] = in * a1 + z2 - b1 * out;
audioBuffer[zIndex + 3] = in * a2 - b2 * out;
}
else{
z1 = audioBuffer[zIndex + 2];
z2 = audioBuffer[zIndex + 3];
out = in * a0 + z1;
audioBuffer[zIndex] = in * a1 + z2 - b1 * out;
audioBuffer[zIndex + 1] = in * a2 - b2 * out;
}
//filtered output
vb_z = out;
vb_y = 0;
vb_x = 0;
}else if(excitorMode == 1){
vb_z = sin(iter * DT * 2 * 3.14159265359 * 440);
vb_y = 0;
vb_x = 0;
}else{
}
// printf("delta_p %f, p_mouth %f, p_bore %f, u_bore %f\n", delta_p, p_mouth/3000.0, input[i_global_p_bore].x, u_bore);
}
newPressure = isBeta * newPressure; //hard set wall pressures to zero
//velocity computations next to beta cells should not rely on pressure equation, so pressure doesn't matter
// else{
// vb_z += (max(beta_vz_dir, 0) * value.x + min(beta_vz_dir, 0) * valueInfront.x) * ADMITTANCE;
// }
float sigma = getSigmaCUDA(aux);
int beta_x = min(isBeta, getBetaCUDA(auxRight)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_x = newPressureRight - newPressure;
float sigma_prime_dt_x = (1 - beta_x + sigma) * DT;
float current_vx = value.y;
// float new_vx = beta_x * ( current_vx - COEFF_GRADIENT * grad_x + sigma_prime_dt_x * vb_x)/(beta_x + sigma_prime_dt_x);
float new_vx = (beta_x * current_vx - beta_x * COEFF_GRADIENT * grad_x + sigma_prime_dt_x * vb_x)/(beta_x + sigma_prime_dt_x);
int beta_y = min(isBeta, getBetaCUDA(auxDown)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_y = newPressureDown - newPressure;
float sigma_prime_dt_y = (1 - beta_y + sigma) * DT;
float current_vy = value.z;
float new_vy = (beta_y * current_vy - beta_y * COEFF_GRADIENT * grad_y + sigma_prime_dt_y * vb_y)/(beta_y + sigma_prime_dt_y);
int beta_z = min(isBeta, getBetaCUDA(auxInfront)); //if beta_vx_dir = -1 or 1, then beta_vx_dir = 0 as expected
float grad_z = newPressureInfront - newPressure;
float sigma_prime_dt_z = (1 - beta_z + sigma) * DT;
float current_vz = value.w;
float new_vz = (beta_z * current_vz - beta_z * COEFF_GRADIENT * grad_z + sigma_prime_dt_z * vb_z)/(beta_z + sigma_prime_dt_z);
value.x = newPressure;
value.y = new_vx;
value.z = new_vy;
value.w = new_vz;
if(validw)
output[outputIndex] = value;
// if(iter % OVERSAMPLE == 0 && outputIndex == i_global_listener){
// audioBuffer[iter/OVERSAMPLE] = newPressure;
// }
if(outputIndex == i_global_listener){
audioBuffer[iter] = newPressure;
}
// // Compute the output value
// float4 value = stencil[0] * current;
// #pragma unroll 4
// for (int i = 1 ; i <= RADIUS ; i++)
// {
// value += stencil[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] + tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]);
// }
// // Store the output value
// if (validw)
// output[outputIndex] = value;
}
}
|
87edb516bd6afc0e5d717138cffafbc14776e971.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* ogreen@gatech.edu
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balacing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balacing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
| 87edb516bd6afc0e5d717138cffafbc14776e971.cu | /**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* ogreen@gatech.edu
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balacing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balacing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
|
b54e9feab54b24ac92f68e4efffa3f246668ac30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ inline float stableSigmoid(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
__global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableSigmoid(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableSigmoid(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
} | b54e9feab54b24ac92f68e4efffa3f246668ac30.cu | #include "includes.h"
__device__ inline float stableSigmoid(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
__global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableSigmoid(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableSigmoid(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
} |
c9578c0f17c69ef923db9be5460c9ff879e2e0eb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Parquet reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace parquet {
// Import functionality that's independent of legacy code
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Parquet datatype to cuDF type enum
*/
constexpr type_id to_type_id(parquet::Type physical,
parquet::ConvertedType logical,
bool strings_to_categorical,
type_id timestamp_type_id, int32_t decimal_scale) {
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8:
return type_id::INT8;
case parquet::UINT_16:
case parquet::INT_16:
return type_id::INT16;
case parquet::DATE:
return type_id::TIMESTAMP_DAYS;
case parquet::TIMESTAMP_MICROS:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_MICROSECONDS;
case parquet::TIMESTAMP_MILLIS:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_MILLISECONDS;
case parquet::DECIMAL:
if (decimal_scale != 0 ||
(physical != parquet::INT32 && physical != parquet::INT64)) {
return type_id::FLOAT64;
}
break;
default:
break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN:
return type_id::BOOL8;
case parquet::INT32:
return type_id::INT32;
case parquet::INT64:
return type_id::INT64;
case parquet::FLOAT:
return type_id::FLOAT32;
case parquet::DOUBLE:
return type_id::FLOAT64;
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring)
return strings_to_categorical ? type_id::INT32 : type_id::STRING;
case parquet::INT96:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
default:
break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id) {
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS:
return 1;
case type_id::TIMESTAMP_MILLISECONDS:
return 1000;
case type_id::TIMESTAMP_MICROSECONDS:
return 1000000;
case type_id::TIMESTAMP_NANOSECONDS:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that returns the required the number of bits to store a value
*/
template <typename T = uint8_t>
T required_bits(uint32_t max_level) {
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length) {
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_timestamp(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL &&
column_type_id != type_id::FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
} // namespace
/**
* @brief Class for parsing dataset metadata
*/
struct metadata : public FileMetaData {
explicit metadata(datasource *source) {
constexpr auto header_len = sizeof(file_header_s);
constexpr auto ender_len = sizeof(file_ender_s);
const auto len = source->size();
const auto header_buffer = source->get_buffer(0, header_len);
const auto header = (const file_header_s *)header_buffer->data();
const auto ender_buffer = source->get_buffer(len - ender_len, ender_len);
const auto ender = (const file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(
header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 &&
ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->get_buffer(len - ender->footer_len - ender_len,
ender->footer_len);
CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
}
inline int get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema) {
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) {
s += "." + path_in_schema[i];
}
return s;
}
std::vector<std::string> get_column_names() {
std::vector<std::string> all_names;
if (row_groups.size() != 0) {
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
}
return all_names;
}
/**
* @brief Extracts the column name used for the row indexes in a dataframe
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return std::string Name of the index column
*/
std::string get_pandas_index_name() {
auto it =
std::find_if(key_value_metadata.begin(), key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
const auto pos = it->value.find("index_columns");
if (pos != std::string::npos) {
const auto begin = it->value.find('[', pos);
const auto end = it->value.find(']', begin);
if ((end - begin) > 1) {
return it->value.substr(begin + 2, end - begin - 3);
}
}
}
return "";
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param row_group Index of the row group to select
* @param row_start Starting row of the selection
* @param row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
*/
auto select_row_groups(int row_group, int &row_start, int &row_count) {
std::vector<std::pair<int, int>> selection;
if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
for (int i = 0; i < row_group; ++i) {
row_start += row_groups[i].num_rows;
}
selection.emplace_back(row_group, row_start);
row_count = row_groups[row_group].num_rows;
} else {
row_start = ::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) {
count += row_groups[i].num_rows;
if (count > row_start || count == 0) {
selection.emplace_back(i, count - row_groups[i].num_rows);
}
if (count >= (row_start + row_count)) {
break;
}
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param use_names List of column names to select
* @param include_index Whether to always include the PANDAS index column
* @param pandas_index Name of the PANDAS index column
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names, bool include_index,
const std::string &pandas_index) {
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) {
selection.emplace_back(selection.size(), name);
}
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) {
if (std::find(use_names.begin(), use_names.end(), pandas_index) ==
use_names.end()) {
use_names.push_back(pandas_index);
}
}
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
};
size_t reader::impl::count_page_headers(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hipStream_t stream) {
size_t total_pages = 0;
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::impl::decode_page_headers(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, hipStream_t stream) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages =
chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
rmm::device_buffer reader::impl::decompress_page_data(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, hipStream_t stream) {
auto for_each_codec_page = [&](parquet::Compression codec,
const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{
std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decomp_size += pages[page].uncompressed_page_size;
codec.second++;
num_comp_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages,
stream);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(hipMemcpyAsync(
inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice, stream));
CUDA_TRY(hipMemcpyAsync(
inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice, stream));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, 1, stream))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, stream));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(
inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(),
debrotli_scratch.size(), argc - start_pos, stream));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
CUDA_TRY(hipMemcpyAsync(
inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyDeviceToHost, stream));
}
}
CUDA_TRY(hipStreamSynchronize(stream));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(hipMemcpyAsync(pages.device_ptr(), pages.host_ptr(),
pages.memory_size(), hipMemcpyHostToDevice,
stream));
return decomp_pages;
}
void reader::impl::decode_page_data(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, size_t min_row,
size_t total_rows, const std::vector<int> &chunk_map,
std::vector<column_buffer> &out_buffers, hipStream_t stream) {
auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) {
str_dict_index.resize(total_str_dict_indexes);
}
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].column_data_base = out_buffers[chunk_map[c]].data();
chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask();
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice,
stream));
if (total_str_dict_indexes > 0) {
CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(),
stream));
}
CUDA_TRY(gpu::DecodePageData(pages.device_ptr(), pages.size(),
chunks.device_ptr(), chunks.size(), total_rows,
min_row, stream));
CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
out_buffers[chunk_map[c]].null_count() +=
pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr) {
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(
options.columns, options.use_pandas_metadata, _pandas_index);
// Store the index column (PANDAS-specific)
_pandas_index = _metadata->get_pandas_index_name();
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) {
_timestamp_type = options.timestamp_type;
}
// Strings may be returned as either string or categorical columns
_strings_to_categorical = options.strings_to_categorical;
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows, int row_group,
hipStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only row groups required
const auto selected_row_groups =
_metadata->select_row_groups(row_group, skip_rows, num_rows);
if (selected_row_groups.size() != 0 && _selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto &col_schema =
_metadata->schema[_metadata->row_groups[selected_row_groups[0].first]
.columns[col.first]
.schema_idx];
auto col_type = to_type_id(col_schema.type, col_schema.converted_type,
_strings_to_categorical, _timestamp_type.id(),
col_schema.decimal_scale);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
// Descriptors for all the chunks that make up the selected columns
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream);
// Association between each column chunk and its column
std::vector<int> chunk_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
auto row_group = _metadata->row_groups[rg.first];
auto row_group_start = rg.second;
auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
for (size_t i = 0; i < num_columns; ++i) {
auto col = _selected_columns[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema =
_metadata->schema[row_group.columns[col.first].schema_idx];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width;
int32_t clock_rate;
int8_t converted_type;
std::tie(type_width, clock_rate, converted_type) = conversion_info(
column_types[i].id(), _timestamp_type.id(), col_schema.type,
col_schema.converted_type, col_schema.type_length);
uint8_t *d_compdata = nullptr;
if (col_meta.total_compressed_size != 0) {
const auto offset = (col_meta.dictionary_page_offset != 0)
? ::min(col_meta.data_page_offset,
col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
auto buffer =
_source->get_buffer(offset, col_meta.total_compressed_size);
page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size(), stream);
d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data());
}
chunks.insert(gpu::ColumnChunkDesc(
col_meta.total_compressed_size, d_compdata, col_meta.num_values,
col_schema.type, type_width, row_group_start, row_group_rows,
col_schema.max_definition_level, col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level), col_meta.codec,
converted_type, col_schema.decimal_scale, clock_rate));
// Map each column chunk to its column index
chunk_map[chunks.size() - 1] = i;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Process dataset chunk pages into output columns
const auto total_pages = count_page_headers(chunks, stream);
if (total_pages > 0) {
hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages, stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages, stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
out_buffers.emplace_back(column_types[i], num_rows, stream, _mr);
}
decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map,
out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(column_types[i], num_rows,
out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _selected_columns[i].second;
}
// Return user metadata
for (const auto& kv : _metadata->key_value_metadata) {
out_metadata.user_data.insert({kv.key, kv.value});
}
return { std::make_unique<table>(std::move(out_columns)), std::move(out_metadata) };
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) {
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), options,
mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
std::string reader::get_pandas_index() { return _impl->get_pandas_index(); }
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream) {
return _impl->read(0, -1, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_group(size_type row_group,
hipStream_t stream) {
return _impl->read(0, -1, row_group, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows,
size_type num_rows,
hipStream_t stream) {
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, stream);
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
| c9578c0f17c69ef923db9be5460c9ff879e2e0eb.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Parquet reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace experimental {
namespace io {
namespace detail {
namespace parquet {
// Import functionality that's independent of legacy code
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Parquet datatype to cuDF type enum
*/
constexpr type_id to_type_id(parquet::Type physical,
parquet::ConvertedType logical,
bool strings_to_categorical,
type_id timestamp_type_id, int32_t decimal_scale) {
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8:
return type_id::INT8;
case parquet::UINT_16:
case parquet::INT_16:
return type_id::INT16;
case parquet::DATE:
return type_id::TIMESTAMP_DAYS;
case parquet::TIMESTAMP_MICROS:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_MICROSECONDS;
case parquet::TIMESTAMP_MILLIS:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_MILLISECONDS;
case parquet::DECIMAL:
if (decimal_scale != 0 ||
(physical != parquet::INT32 && physical != parquet::INT64)) {
return type_id::FLOAT64;
}
break;
default:
break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN:
return type_id::BOOL8;
case parquet::INT32:
return type_id::INT32;
case parquet::INT64:
return type_id::INT64;
case parquet::FLOAT:
return type_id::FLOAT32;
case parquet::DOUBLE:
return type_id::FLOAT64;
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring)
return strings_to_categorical ? type_id::INT32 : type_id::STRING;
case parquet::INT96:
return (timestamp_type_id != type_id::EMPTY)
? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
default:
break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id) {
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS:
return 1;
case type_id::TIMESTAMP_MILLISECONDS:
return 1000;
case type_id::TIMESTAMP_MICROSECONDS:
return 1000000;
case type_id::TIMESTAMP_NANOSECONDS:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that returns the required the number of bits to store a value
*/
template <typename T = uint8_t>
T required_bits(uint32_t max_level) {
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length) {
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_timestamp(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL &&
column_type_id != type_id::FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
} // namespace
/**
* @brief Class for parsing dataset metadata
*/
struct metadata : public FileMetaData {
explicit metadata(datasource *source) {
constexpr auto header_len = sizeof(file_header_s);
constexpr auto ender_len = sizeof(file_ender_s);
const auto len = source->size();
const auto header_buffer = source->get_buffer(0, header_len);
const auto header = (const file_header_s *)header_buffer->data();
const auto ender_buffer = source->get_buffer(len - ender_len, ender_len);
const auto ender = (const file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(
header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 &&
ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->get_buffer(len - ender->footer_len - ender_len,
ender->footer_len);
CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
}
inline int get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema) {
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) {
s += "." + path_in_schema[i];
}
return s;
}
std::vector<std::string> get_column_names() {
std::vector<std::string> all_names;
if (row_groups.size() != 0) {
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
}
return all_names;
}
/**
* @brief Extracts the column name used for the row indexes in a dataframe
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return std::string Name of the index column
*/
std::string get_pandas_index_name() {
auto it =
std::find_if(key_value_metadata.begin(), key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
const auto pos = it->value.find("index_columns");
if (pos != std::string::npos) {
const auto begin = it->value.find('[', pos);
const auto end = it->value.find(']', begin);
if ((end - begin) > 1) {
return it->value.substr(begin + 2, end - begin - 3);
}
}
}
return "";
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param row_group Index of the row group to select
* @param row_start Starting row of the selection
* @param row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
*/
auto select_row_groups(int row_group, int &row_start, int &row_count) {
std::vector<std::pair<int, int>> selection;
if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
for (int i = 0; i < row_group; ++i) {
row_start += row_groups[i].num_rows;
}
selection.emplace_back(row_group, row_start);
row_count = row_groups[row_group].num_rows;
} else {
row_start = std::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) {
count += row_groups[i].num_rows;
if (count > row_start || count == 0) {
selection.emplace_back(i, count - row_groups[i].num_rows);
}
if (count >= (row_start + row_count)) {
break;
}
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param use_names List of column names to select
* @param include_index Whether to always include the PANDAS index column
* @param pandas_index Name of the PANDAS index column
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names, bool include_index,
const std::string &pandas_index) {
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) {
selection.emplace_back(selection.size(), name);
}
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) {
if (std::find(use_names.begin(), use_names.end(), pandas_index) ==
use_names.end()) {
use_names.push_back(pandas_index);
}
}
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
};
size_t reader::impl::count_page_headers(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
cudaStream_t stream) {
size_t total_pages = 0;
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::impl::decode_page_headers(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, cudaStream_t stream) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages =
chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
rmm::device_buffer reader::impl::decompress_page_data(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, cudaStream_t stream) {
auto for_each_codec_page = [&](parquet::Compression codec,
const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{
std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decomp_size += pages[page].uncompressed_page_size;
codec.second++;
num_comp_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages,
stream);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(cudaMemcpyAsync(
inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice, stream));
CUDA_TRY(cudaMemcpyAsync(
inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice, stream));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, 1, stream))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, stream));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(
inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(),
debrotli_scratch.size(), argc - start_pos, stream));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
CUDA_TRY(cudaMemcpyAsync(
inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyDeviceToHost, stream));
}
}
CUDA_TRY(cudaStreamSynchronize(stream));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(cudaMemcpyAsync(pages.device_ptr(), pages.host_ptr(),
pages.memory_size(), cudaMemcpyHostToDevice,
stream));
return decomp_pages;
}
void reader::impl::decode_page_data(
const hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<gpu::PageInfo> &pages, size_t min_row,
size_t total_rows, const std::vector<int> &chunk_map,
std::vector<column_buffer> &out_buffers, cudaStream_t stream) {
auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) {
str_dict_index.resize(total_str_dict_indexes);
}
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].column_data_base = out_buffers[chunk_map[c]].data();
chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask();
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice,
stream));
if (total_str_dict_indexes > 0) {
CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(),
stream));
}
CUDA_TRY(gpu::DecodePageData(pages.device_ptr(), pages.size(),
chunks.device_ptr(), chunks.size(), total_rows,
min_row, stream));
CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
out_buffers[chunk_map[c]].null_count() +=
pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr) {
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(
options.columns, options.use_pandas_metadata, _pandas_index);
// Store the index column (PANDAS-specific)
_pandas_index = _metadata->get_pandas_index_name();
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) {
_timestamp_type = options.timestamp_type;
}
// Strings may be returned as either string or categorical columns
_strings_to_categorical = options.strings_to_categorical;
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows, int row_group,
cudaStream_t stream) {
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only row groups required
const auto selected_row_groups =
_metadata->select_row_groups(row_group, skip_rows, num_rows);
if (selected_row_groups.size() != 0 && _selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto &col_schema =
_metadata->schema[_metadata->row_groups[selected_row_groups[0].first]
.columns[col.first]
.schema_idx];
auto col_type = to_type_id(col_schema.type, col_schema.converted_type,
_strings_to_categorical, _timestamp_type.id(),
col_schema.decimal_scale);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
// Descriptors for all the chunks that make up the selected columns
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream);
// Association between each column chunk and its column
std::vector<int> chunk_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
auto row_group = _metadata->row_groups[rg.first];
auto row_group_start = rg.second;
auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
for (size_t i = 0; i < num_columns; ++i) {
auto col = _selected_columns[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema =
_metadata->schema[row_group.columns[col.first].schema_idx];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width;
int32_t clock_rate;
int8_t converted_type;
std::tie(type_width, clock_rate, converted_type) = conversion_info(
column_types[i].id(), _timestamp_type.id(), col_schema.type,
col_schema.converted_type, col_schema.type_length);
uint8_t *d_compdata = nullptr;
if (col_meta.total_compressed_size != 0) {
const auto offset = (col_meta.dictionary_page_offset != 0)
? std::min(col_meta.data_page_offset,
col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
auto buffer =
_source->get_buffer(offset, col_meta.total_compressed_size);
page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size(), stream);
d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data());
}
chunks.insert(gpu::ColumnChunkDesc(
col_meta.total_compressed_size, d_compdata, col_meta.num_values,
col_schema.type, type_width, row_group_start, row_group_rows,
col_schema.max_definition_level, col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level), col_meta.codec,
converted_type, col_schema.decimal_scale, clock_rate));
// Map each column chunk to its column index
chunk_map[chunks.size() - 1] = i;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Process dataset chunk pages into output columns
const auto total_pages = count_page_headers(chunks, stream);
if (total_pages > 0) {
hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages, stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages, stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
out_buffers.emplace_back(column_types[i], num_rows, stream, _mr);
}
decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map,
out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(column_types[i], num_rows,
out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _selected_columns[i].second;
}
// Return user metadata
for (const auto& kv : _metadata->key_value_metadata) {
out_metadata.user_data.insert({kv.key, kv.value});
}
return { std::make_unique<table>(std::move(out_columns)), std::move(out_metadata) };
}
// Forward to implementation
reader::reader(std::string filepath, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) {
}
// Forward to implementation
reader::reader(const char *buffer, size_t length, reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(buffer, length), options,
mr)) {}
// Forward to implementation
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
std::string reader::get_pandas_index() { return _impl->get_pandas_index(); }
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream) {
return _impl->read(0, -1, -1, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_group(size_type row_group,
cudaStream_t stream) {
return _impl->read(0, -1, row_group, stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows,
size_type num_rows,
cudaStream_t stream) {
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, stream);
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace experimental
} // namespace cudf
|
42f4be46d60e48a8088c3d27f962499f95c1a439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std::chrono;
using namespace std;
#define TPB 256
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define N (NUM_PARTICLES/TPB + 1)
struct particle {
float position[3];
float velocity[3];
};
struct seed {
int x;
int y;
int z;
};
__host__ __device__ float gen_random(int seed, int particle_id, int iteration)
{
float rand_num = (seed * particle_id + iteration) % NUM_PARTICLES;
// printf("seed = %d, particle_id = %d, iteration = %d, rand_num = %e\n",
// seed,
// particle_id,
// iteration,
// rand_num);
return rand_num;
}
__host__ __device__ void updateVelAndPos(particle *particles, seed seed, int iteration, int particle_id)
{
// Velocity update:
particles[particle_id].velocity[0] = gen_random(seed.x, particle_id, iteration);
particles[particle_id].velocity[1] = gen_random(seed.y, particle_id, iteration);
particles[particle_id].velocity[2] = gen_random(seed.z, particle_id, iteration);
// Position update:
particles[particle_id].position[0] = particles[particle_id].position[0] + particles[particle_id].velocity[0];
particles[particle_id].position[1] = particles[particle_id].position[1] + particles[particle_id].velocity[1];
particles[particle_id].position[2] = particles[particle_id].position[2] + particles[particle_id].velocity[2];
}
__global__ void timestepGPU(particle *particles, seed seed, int iteration) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<NUM_PARTICLES) {
// printf("Old threadId = %d, velocity.x = %e, position.x = %e\n",
// threadIdx.x, particles[i].velocity[0], particles[i].position[0]);
updateVelAndPos(particles, seed, iteration, i);
// printf("New threadId = %d, velocity.x = %e\n", threadIdx.x, particles[i].velocity[0]);
}
}
void timestepCPU(particle *particles, seed seed, int iteration) {
for (int i = 0; i < NUM_PARTICLES; i++) {
updateVelAndPos(particles, seed, iteration, i);
}
}
int main()
{
seed seed = {5,6,7};
particle *particlesCPU = new particle[NUM_PARTICLES];
particle *particlesGPU2CPU = new particle[NUM_PARTICLES];
particle *particlesGPU = new particle[NUM_PARTICLES];
//////// CPU calculations ////////
auto startCPU = high_resolution_clock::now();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepCPU(particlesCPU, seed, i);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesCPU[ii].position[0] << "\n";
// }
auto stopCPU = high_resolution_clock::now();
auto durationCPU = duration_cast<milliseconds>(stopCPU - startCPU);
cout << "---------------\n";
//////// GPU calculations ////////
auto startGPU = high_resolution_clock::now();
hipMalloc(&particlesGPU, sizeof(particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
// New:
hipMemcpy(particlesGPU, particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES, hipMemcpyHostToDevice);
// cout << "iteration: " << i <<"\n";
hipLaunchKernelGGL(( timestepGPU), dim3(N), dim3(TPB), 0, 0, particlesGPU, seed, i);
hipDeviceSynchronize();
hipMemcpy(particlesGPU2CPU, particlesGPU, sizeof(particle) * NUM_PARTICLES, hipMemcpyDeviceToHost);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesGPU2CPU[ii].position[0] << "\n";
// }
auto stopGPU = high_resolution_clock::now();
auto durationGPU = duration_cast<milliseconds>(stopGPU - startGPU);
//////// Compare calculations ////////
float maxError = 0.0f;
for (int particle_i = 0; particle_i < NUM_PARTICLES; particle_i++) {
for (int dim = 0; dim < 3; dim++) {
maxError = fmax(maxError, fabs(
particlesGPU2CPU[particle_i].position[dim] - particlesCPU[particle_i].position[dim]
));
}
}
std::cout << "Max error: " << maxError << std::endl;
delete[] particlesGPU2CPU;
hipFree(particlesGPU);
delete[] particlesCPU;
//////////////////////////////////
cout << "CPU duration in milliseconds: " << durationCPU.count() << endl;
cout << "GPU duration in milliseconds: " << durationGPU.count() << endl;
return 0;
}
| 42f4be46d60e48a8088c3d27f962499f95c1a439.cu | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std::chrono;
using namespace std;
#define TPB 256
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define N (NUM_PARTICLES/TPB + 1)
struct particle {
float position[3];
float velocity[3];
};
struct seed {
int x;
int y;
int z;
};
__host__ __device__ float gen_random(int seed, int particle_id, int iteration)
{
float rand_num = (seed * particle_id + iteration) % NUM_PARTICLES;
// printf("seed = %d, particle_id = %d, iteration = %d, rand_num = %e\n",
// seed,
// particle_id,
// iteration,
// rand_num);
return rand_num;
}
__host__ __device__ void updateVelAndPos(particle *particles, seed seed, int iteration, int particle_id)
{
// Velocity update:
particles[particle_id].velocity[0] = gen_random(seed.x, particle_id, iteration);
particles[particle_id].velocity[1] = gen_random(seed.y, particle_id, iteration);
particles[particle_id].velocity[2] = gen_random(seed.z, particle_id, iteration);
// Position update:
particles[particle_id].position[0] = particles[particle_id].position[0] + particles[particle_id].velocity[0];
particles[particle_id].position[1] = particles[particle_id].position[1] + particles[particle_id].velocity[1];
particles[particle_id].position[2] = particles[particle_id].position[2] + particles[particle_id].velocity[2];
}
__global__ void timestepGPU(particle *particles, seed seed, int iteration) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<NUM_PARTICLES) {
// printf("Old threadId = %d, velocity.x = %e, position.x = %e\n",
// threadIdx.x, particles[i].velocity[0], particles[i].position[0]);
updateVelAndPos(particles, seed, iteration, i);
// printf("New threadId = %d, velocity.x = %e\n", threadIdx.x, particles[i].velocity[0]);
}
}
void timestepCPU(particle *particles, seed seed, int iteration) {
for (int i = 0; i < NUM_PARTICLES; i++) {
updateVelAndPos(particles, seed, iteration, i);
}
}
int main()
{
seed seed = {5,6,7};
particle *particlesCPU = new particle[NUM_PARTICLES];
particle *particlesGPU2CPU = new particle[NUM_PARTICLES];
particle *particlesGPU = new particle[NUM_PARTICLES];
//////// CPU calculations ////////
auto startCPU = high_resolution_clock::now();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepCPU(particlesCPU, seed, i);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesCPU[ii].position[0] << "\n";
// }
auto stopCPU = high_resolution_clock::now();
auto durationCPU = duration_cast<milliseconds>(stopCPU - startCPU);
cout << "---------------\n";
//////// GPU calculations ////////
auto startGPU = high_resolution_clock::now();
cudaMalloc(&particlesGPU, sizeof(particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
// New:
cudaMemcpy(particlesGPU, particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES, cudaMemcpyHostToDevice);
// cout << "iteration: " << i <<"\n";
timestepGPU<<<N, TPB>>>(particlesGPU, seed, i);
cudaDeviceSynchronize();
cudaMemcpy(particlesGPU2CPU, particlesGPU, sizeof(particle) * NUM_PARTICLES, cudaMemcpyDeviceToHost);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesGPU2CPU[ii].position[0] << "\n";
// }
auto stopGPU = high_resolution_clock::now();
auto durationGPU = duration_cast<milliseconds>(stopGPU - startGPU);
//////// Compare calculations ////////
float maxError = 0.0f;
for (int particle_i = 0; particle_i < NUM_PARTICLES; particle_i++) {
for (int dim = 0; dim < 3; dim++) {
maxError = fmax(maxError, fabs(
particlesGPU2CPU[particle_i].position[dim] - particlesCPU[particle_i].position[dim]
));
}
}
std::cout << "Max error: " << maxError << std::endl;
delete[] particlesGPU2CPU;
cudaFree(particlesGPU);
delete[] particlesCPU;
//////////////////////////////////
cout << "CPU duration in milliseconds: " << durationCPU.count() << endl;
cout << "GPU duration in milliseconds: " << durationGPU.count() << endl;
return 0;
}
|
1ea86309bd82be6f1e95ce0d887c9d3324445e87.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <tiny_helper_cuda.h>
#include <vector.cuh>
__global__ void
vectormult_kernel(float *A, float k, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] *= k;
}
}
__global__ void
vectoradd_kernel(const float* A, const float* B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
void vectormult(float* dev_vector, float k, int size)
{
const int n_threads = 128;
const int n_blocks = (size + n_threads - 1) / n_threads;
hipLaunchKernelGGL(( vectormult_kernel) , dim3(n_blocks), dim3(n_threads) , 0, 0, dev_vector, k, size);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("vectormult_kernel");
}
void vectoradd(const float* A, const float* B, float* C, int size)
{
const int n_threads = 128;
const int n_blocks = (size + n_threads - 1) / n_threads;
hipLaunchKernelGGL(( vectoradd_kernel) , dim3(n_blocks), dim3(n_threads) , 0, 0, A, B, C, size);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("vectormult_kernel");
} | 1ea86309bd82be6f1e95ce0d887c9d3324445e87.cu | #include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <tiny_helper_cuda.h>
#include <vector.cuh>
__global__ void
vectormult_kernel(float *A, float k, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] *= k;
}
}
__global__ void
vectoradd_kernel(const float* A, const float* B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
void vectormult(float* dev_vector, float k, int size)
{
const int n_threads = 128;
const int n_blocks = (size + n_threads - 1) / n_threads;
vectormult_kernel <<<n_blocks, n_threads >>>(dev_vector, k, size);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("vectormult_kernel");
}
void vectoradd(const float* A, const float* B, float* C, int size)
{
const int n_threads = 128;
const int n_blocks = (size + n_threads - 1) / n_threads;
vectoradd_kernel <<<n_blocks, n_threads >>>(A, B, C, size);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("vectormult_kernel");
} |
9f547764285a1280030200cdb335a6817808e2ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include "c_cuda_structures.h"
//==========================================================================
// Operations with CUDAVector
//==========================================================================
__device__ void mov(const CUDAVector &a, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i];
}
__device__ void add(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] + b.v[i];
}
__device__ void sub(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] - b.v[i];
}
__device__ void mul(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] * b.v[i];
}
__device__ void mul(const CUDAVector &a, const myfloat b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] * b;
}
__device__ void div(const CUDAVector &a, const myfloat b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] / b;
}
__device__ double square(const CUDAVector &a)
{
double result = 0;
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result += a.v[i] * a.v[i];
return result;
}
//==========================================================================
// Operations with CUDAMolecule
//==========================================================================
#define Boltzmann 1.3806488e-23
#define Angstrom 1e-10
#define AtomicMassUnit 1.660538921e-27
#define csigma (2.74*1e-10) //m
#define cepsilon (36.2*1.3806488e-23) //J
#define mass (20.1797 * 1.660538921e-27)
#define maxDistSquare (4.0 * csigma * 4.0 * csigma)
__device__ inline void d_Force_LennardJones(const CUDAVector& r, myfloat square, CUDAVector& F, myfloat sigma, myfloat epsilon)
{
#ifdef ODINARY_PRECISION
myfloat _sigma = sigma;
myfloat _epsilon = epsilon;
#else
volatile myfloat _sigma = sigma;
volatile myfloat _epsilon = epsilon;
#endif
myfloat sigmaSquare = _sigma * _sigma;
square = sigmaSquare / square;
myfloat r4 = square * square;
myfloat r8 = r4*r4;
myfloat U = 2.0*r8*r8/square - r8;
myfloat c = -24.0 * _epsilon / sigmaSquare;
mul(r, c * U, F);
}
//==========================================================================
// Operations with CUDAUnderspace
//==========================================================================
__device__ void d_recalculatePositions_Beeman(CUDAUnderspace *cus, myfloat dt)
{
auto molecules = GET_POINTER(CUDAMolecule, cus, cus->moleculesShift);
for (size_t i = 0; i < cus->numberOfMolecules; ++i) {
mov(molecules[i].r, molecules[i].oldr);
CUDAVector tmp;
//i.r += i.v * dt;
mul(molecules[i].v, dt, tmp);
add(molecules[i].r, tmp, molecules[i].r);
//i.r += 4.0 / 6.0 * (i.F / i.m) * (dt*dt);
mul(molecules[i].F, (4.0 / 6.0) * (dt*dt) / mass, tmp);
add(molecules[i].r, tmp, molecules[i].r);
//i.r -= 1.0 / 6.0 * (i.oldF / i.m) * (dt*dt);
mul(molecules[i].oldF, - (1.0/6.0) * (dt*dt) / mass, tmp);
add(molecules[i].r, tmp, molecules[i].r);
}
}
__device__ void d_recalculateSpeeds_Beeman(CUDAUnderspace *cus, myfloat dt, int width, int height, int depth)
{
auto molecules = GET_POINTER(CUDAMolecule, cus, cus->moleculesShift);
for (size_t i = 0; i < cus->numberOfMolecules; ++i) {
//cus->molecules[i].newF.v[0] += 1e10;
CUDAVector tmp;
//i.v += 2.0 / 6.0 * (i.newF / i.m) * dt;
mul(molecules[i].newF, (2.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
//i.v += 5.0 / 6.0 * (i.F / i.m) * dt;
mul(molecules[i].F, (5.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
//i.v -= 1.0 / 6.0 * (i.oldF / i.m) * dt;
mul(molecules[i].oldF, - (1.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
if (molecules[i].r.v[0] <= 0) {
molecules[i].v.v[0] = abs(molecules[i].v.v[0]);
}
if (width * Angstrom <= molecules[i].r.v[0]) {
molecules[i].v.v[0] = -abs(molecules[i].v.v[0]);
}
if (molecules[i].r.v[1] <= 0) {
molecules[i].v.v[1] = abs(molecules[i].v.v[1]);
}
if (height * Angstrom <= molecules[i].r.v[1]) {
molecules[i].v.v[1] = -abs(molecules[i].v.v[1]);
}
if (molecules[i].r.v[2] <= 0) {
molecules[i].v.v[2] = abs(molecules[i].v.v[2]);
}
if (depth * Angstrom <= molecules[i].r.v[2]) {
molecules[i].v.v[2] = -abs(molecules[i].v.v[2]);
}
}
}
__device__ void d_calculateNewForcesForUnderspace(CUDASpace *cs, int nx, int ny, int nz)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
CUDAUnderspace *centralSpace = &underspaces[LINEAR(cs, nx, ny, nz)];
int closest = 1;
for (int dx = -closest; dx <= closest; ++dx) {
for (int dy = -closest; dy <= closest; ++dy) {
for (int dz = -closest; dz <= closest; ++dz) {
int x = nx + dx;
int y = ny + dy;
int z = nz + dz;
if (x < 0 || y < 0 || z < 0) continue;
if (x >= cs->Nx) continue;
if (y >= cs->Ny) continue;
if (z >= cs->Nz) continue;
d_calculateNewForces(centralSpace, &underspaces[LINEAR(cs, x, y, z)], cs);
}
}
}
}
__device__ void d_calculateNewForces(CUDAUnderspace *cus1, CUDAUnderspace *cus2, CUDASpace *cs)
{
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
cs->sigma[i][j] = csigma;
cs->epsilon[i][j] = cepsilon;
}
}
auto molecules1 = GET_POINTER(CUDAMolecule, cus1, cus1->moleculesShift);
auto molecules2 = GET_POINTER(CUDAMolecule, cus2, cus2->moleculesShift);
for (size_t i = 0; i < cus1->numberOfMolecules; ++i) {
for (size_t j = 0; j < cus2->numberOfMolecules; ++j) {
//if (i == j && cus1 == cus2) continue;
//Vector r = (*j).r - (*i).r;
CUDAVector tmp;
sub(molecules2[j].r, molecules1[i].r, tmp);
myfloat sq = square(tmp);
if (sq == 0) continue;
//if (maxDistSquare < sq) continue;
//(*i).newF += d_Force_LennardJones(r, sq);
int type1 = molecules1[i].type;
int type2 = molecules2[j].type;
myfloat sigma = cs->sigma[type1][type2];
myfloat epsilon = cs->epsilon[type1][type2];
//if (3*sigma < sq) continue;
//int *p = nullptr;
//if (epsilon == 0) *p = 1;
d_Force_LennardJones(tmp, sq, tmp, sigma, epsilon);
add(molecules1[i].newF, tmp, molecules1[i].newF);
}
}
}
//==========================================================================
// Operations with CUDASpace
//==========================================================================
__global__ void cuda_recalculatePositions(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_recalculatePositions_Beeman(&underspaces[LINEAR(cs, nx, ny, nz)], cs->dt);
}
__global__ void cuda_recalculateSpeeds(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_recalculateSpeeds_Beeman(&underspaces[LINEAR(cs, nx, ny, nz)], cs->dt, cs->width, cs->height, cs->depth);
}
__global__ void cuda_recalculateForces(CUDASpace *cs)
{
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_calculateNewForcesForUnderspace(cs, nx, ny, nz);
}
__global__ void cuda_validate(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
CUDAUnderspace& cus = underspaces[LINEAR(cs, nx, ny, nz)];
auto molecules = GET_POINTER(CUDAMolecule, &cus, cus.moleculesShift);
for (size_t i = 0; i < cus.numberOfMolecules; ++i) {
//t.oldF = t.F;
mov(molecules[i].F, molecules[i].oldF);
//t.F = t.newF;
mov(molecules[i].newF, molecules[i].F);
}
}
__global__ void cuda_dropNewF(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
CUDAUnderspace& cus = underspaces[LINEAR(cs, nx, ny, nz)];
auto molecules = GET_POINTER(CUDAMolecule, &cus, cus.moleculesShift);
for (size_t i = 0; i < cus.numberOfMolecules; ++i) {
//t.newF = Vector();
molecules[i].newF.v[0] = 0;
molecules[i].newF.v[1] = 0;
molecules[i].newF.v[2] = 0;
}
}
//__global__ void cuda_recalculateForcesForMolecules(CUDASpace *cs)
//{
// auto allMolecules = GET_POINTER(CUDAMolecule, cs, cs->underspacesShift + cs->Nx*cs->Ny*cs->Nz*sizeof(CUDAUnderspace));
// size_t molecule = blockDim.x * blockIdx.x + threadIdx.x;
//
//
//}
void cuda_oneStep(CUDASpace *d_cs,int Nx, int Ny, int Nz)
{
int numberOfCores = 512;
int coresPerDim = int(pow(numberOfCores, 1.0/3.0));
//int coresPerDim = int(sqrt(numberOfCores));
dim3 grid, blocks;
grid = dim3(Nx / coresPerDim + 1, Ny / coresPerDim + 1, Nz / coresPerDim + 1);
blocks = dim3(coresPerDim, coresPerDim, coresPerDim);
//grid = dim3(Nx / coresPerDim + 1, Ny / coresPerDim + 1, Nz);
//blocks = dim3(coresPerDim, coresPerDim, 1);
hipLaunchKernelGGL(( cuda_recalculatePositions) , dim3(grid), dim3(blocks), 0, 0, d_cs);
//auto cudaStatus = hipGetLastError();
hipDeviceSynchronize();
hipLaunchKernelGGL(( cuda_dropNewF) , dim3(grid), dim3(blocks), 0, 0, d_cs);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cuda_recalculateForces) , dim3(grid), dim3(blocks), 0, 0, d_cs);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cuda_recalculateSpeeds) , dim3(grid), dim3(blocks), 0, 0, d_cs);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cuda_validate) , dim3(grid), dim3(blocks), 0, 0, d_cs);
hipDeviceSynchronize();
}
extern CUDASpace* moveFromHost(CUDASpace *h_cs, size_t wholeSize/* = 0*/)
{
CUDASpace *d_cs;
if (wholeSize == 0)
wholeSize = WHOLE_SIZE_OF_SPACE(h_cs);
hipMalloc(&d_cs, wholeSize); //allocate device memory for all data
hipMemcpy(d_cs, h_cs, wholeSize, hipMemcpyHostToDevice); //copy all data from h_cs (host) to d_cs (device)
//delete[] reinterpret_cast<byte*>(h_cs); //delete data from host memory;
return d_cs;
}
CUDASpace* moveFromDevice(CUDASpace *d_cs, size_t wholeSize/* = 0*/, byte *h_p/* = nullptr*/)
{
if (wholeSize == 0)
throw 0; //TODO copy CUDASpace only from device and calculate wholeSize
if (h_p == nullptr)
h_p = new byte[wholeSize]; //allocate host memory for all data
hipMemcpy(h_p, d_cs, wholeSize, hipMemcpyDeviceToHost); //copy all data
auto h_cs = reinterpret_cast<CUDASpace*>(h_p); //get pointer to CUDASpace
hipFree(d_cs); //delete data from device memory
return h_cs;
}
/*
void freeDeviceMem(CUDASpace *d_cs)
{
for (size_t i = 0; i < d_cs->Nx; ++i) {
for (size_t j = 0; j < d_cs->Ny; ++j) {
for (size_t k = 0; k < d_cs->Nz; ++k) {
hipFree(d_cs->underspaces[i][j][k].molecules);
}
hipFree(d_cs->underspaces[i][j]);
}
hipFree(d_cs->underspaces[i]);
}
hipFree(d_cs->underspaces);
hipFree(d_cs);
}
void freeHostMem(CUDASpace *h_cs)
{
for (size_t i = 0; i < h_cs->Nx; ++i) {
for (size_t j = 0; j < h_cs->Ny; ++j) {
for (size_t k = 0; k < h_cs->Nz; ++k) {
delete h_cs->underspaces[i][j][k].molecules;
}
delete h_cs->underspaces[i][j];
}
delete h_cs->underspaces[i];
}
delete h_cs->underspaces;
delete h_cs;
}
*/
| 9f547764285a1280030200cdb335a6817808e2ec.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <iostream>
#include "c_cuda_structures.h"
//==========================================================================
// Operations with CUDAVector
//==========================================================================
__device__ void mov(const CUDAVector &a, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i];
}
__device__ void add(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] + b.v[i];
}
__device__ void sub(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] - b.v[i];
}
__device__ void mul(const CUDAVector &a, const CUDAVector& b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] * b.v[i];
}
__device__ void mul(const CUDAVector &a, const myfloat b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] * b;
}
__device__ void div(const CUDAVector &a, const myfloat b, CUDAVector& result)
{
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result.v[i] = a.v[i] / b;
}
__device__ double square(const CUDAVector &a)
{
double result = 0;
for (size_t i = 0; i < VECTOR_DIMENSION; ++i)
result += a.v[i] * a.v[i];
return result;
}
//==========================================================================
// Operations with CUDAMolecule
//==========================================================================
#define Boltzmann 1.3806488e-23
#define Angstrom 1e-10
#define AtomicMassUnit 1.660538921e-27
#define csigma (2.74*1e-10) //m
#define cepsilon (36.2*1.3806488e-23) //J
#define mass (20.1797 * 1.660538921e-27)
#define maxDistSquare (4.0 * csigma * 4.0 * csigma)
__device__ inline void d_Force_LennardJones(const CUDAVector& r, myfloat square, CUDAVector& F, myfloat sigma, myfloat epsilon)
{
#ifdef ODINARY_PRECISION
myfloat _sigma = sigma;
myfloat _epsilon = epsilon;
#else
volatile myfloat _sigma = sigma;
volatile myfloat _epsilon = epsilon;
#endif
myfloat sigmaSquare = _sigma * _sigma;
square = sigmaSquare / square;
myfloat r4 = square * square;
myfloat r8 = r4*r4;
myfloat U = 2.0*r8*r8/square - r8;
myfloat c = -24.0 * _epsilon / sigmaSquare;
mul(r, c * U, F);
}
//==========================================================================
// Operations with CUDAUnderspace
//==========================================================================
__device__ void d_recalculatePositions_Beeman(CUDAUnderspace *cus, myfloat dt)
{
auto molecules = GET_POINTER(CUDAMolecule, cus, cus->moleculesShift);
for (size_t i = 0; i < cus->numberOfMolecules; ++i) {
mov(molecules[i].r, molecules[i].oldr);
CUDAVector tmp;
//i.r += i.v * dt;
mul(molecules[i].v, dt, tmp);
add(molecules[i].r, tmp, molecules[i].r);
//i.r += 4.0 / 6.0 * (i.F / i.m) * (dt*dt);
mul(molecules[i].F, (4.0 / 6.0) * (dt*dt) / mass, tmp);
add(molecules[i].r, tmp, molecules[i].r);
//i.r -= 1.0 / 6.0 * (i.oldF / i.m) * (dt*dt);
mul(molecules[i].oldF, - (1.0/6.0) * (dt*dt) / mass, tmp);
add(molecules[i].r, tmp, molecules[i].r);
}
}
__device__ void d_recalculateSpeeds_Beeman(CUDAUnderspace *cus, myfloat dt, int width, int height, int depth)
{
auto molecules = GET_POINTER(CUDAMolecule, cus, cus->moleculesShift);
for (size_t i = 0; i < cus->numberOfMolecules; ++i) {
//cus->molecules[i].newF.v[0] += 1e10;
CUDAVector tmp;
//i.v += 2.0 / 6.0 * (i.newF / i.m) * dt;
mul(molecules[i].newF, (2.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
//i.v += 5.0 / 6.0 * (i.F / i.m) * dt;
mul(molecules[i].F, (5.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
//i.v -= 1.0 / 6.0 * (i.oldF / i.m) * dt;
mul(molecules[i].oldF, - (1.0 / 6.0) * dt / mass, tmp);
add(molecules[i].v, tmp, molecules[i].v);
if (molecules[i].r.v[0] <= 0) {
molecules[i].v.v[0] = abs(molecules[i].v.v[0]);
}
if (width * Angstrom <= molecules[i].r.v[0]) {
molecules[i].v.v[0] = -abs(molecules[i].v.v[0]);
}
if (molecules[i].r.v[1] <= 0) {
molecules[i].v.v[1] = abs(molecules[i].v.v[1]);
}
if (height * Angstrom <= molecules[i].r.v[1]) {
molecules[i].v.v[1] = -abs(molecules[i].v.v[1]);
}
if (molecules[i].r.v[2] <= 0) {
molecules[i].v.v[2] = abs(molecules[i].v.v[2]);
}
if (depth * Angstrom <= molecules[i].r.v[2]) {
molecules[i].v.v[2] = -abs(molecules[i].v.v[2]);
}
}
}
__device__ void d_calculateNewForcesForUnderspace(CUDASpace *cs, int nx, int ny, int nz)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
CUDAUnderspace *centralSpace = &underspaces[LINEAR(cs, nx, ny, nz)];
int closest = 1;
for (int dx = -closest; dx <= closest; ++dx) {
for (int dy = -closest; dy <= closest; ++dy) {
for (int dz = -closest; dz <= closest; ++dz) {
int x = nx + dx;
int y = ny + dy;
int z = nz + dz;
if (x < 0 || y < 0 || z < 0) continue;
if (x >= cs->Nx) continue;
if (y >= cs->Ny) continue;
if (z >= cs->Nz) continue;
d_calculateNewForces(centralSpace, &underspaces[LINEAR(cs, x, y, z)], cs);
}
}
}
}
__device__ void d_calculateNewForces(CUDAUnderspace *cus1, CUDAUnderspace *cus2, CUDASpace *cs)
{
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
cs->sigma[i][j] = csigma;
cs->epsilon[i][j] = cepsilon;
}
}
auto molecules1 = GET_POINTER(CUDAMolecule, cus1, cus1->moleculesShift);
auto molecules2 = GET_POINTER(CUDAMolecule, cus2, cus2->moleculesShift);
for (size_t i = 0; i < cus1->numberOfMolecules; ++i) {
for (size_t j = 0; j < cus2->numberOfMolecules; ++j) {
//if (i == j && cus1 == cus2) continue;
//Vector r = (*j).r - (*i).r;
CUDAVector tmp;
sub(molecules2[j].r, molecules1[i].r, tmp);
myfloat sq = square(tmp);
if (sq == 0) continue;
//if (maxDistSquare < sq) continue;
//(*i).newF += d_Force_LennardJones(r, sq);
int type1 = molecules1[i].type;
int type2 = molecules2[j].type;
myfloat sigma = cs->sigma[type1][type2];
myfloat epsilon = cs->epsilon[type1][type2];
//if (3*sigma < sq) continue;
//int *p = nullptr;
//if (epsilon == 0) *p = 1;
d_Force_LennardJones(tmp, sq, tmp, sigma, epsilon);
add(molecules1[i].newF, tmp, molecules1[i].newF);
}
}
}
//==========================================================================
// Operations with CUDASpace
//==========================================================================
__global__ void cuda_recalculatePositions(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_recalculatePositions_Beeman(&underspaces[LINEAR(cs, nx, ny, nz)], cs->dt);
}
__global__ void cuda_recalculateSpeeds(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_recalculateSpeeds_Beeman(&underspaces[LINEAR(cs, nx, ny, nz)], cs->dt, cs->width, cs->height, cs->depth);
}
__global__ void cuda_recalculateForces(CUDASpace *cs)
{
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
d_calculateNewForcesForUnderspace(cs, nx, ny, nz);
}
__global__ void cuda_validate(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
CUDAUnderspace& cus = underspaces[LINEAR(cs, nx, ny, nz)];
auto molecules = GET_POINTER(CUDAMolecule, &cus, cus.moleculesShift);
for (size_t i = 0; i < cus.numberOfMolecules; ++i) {
//t.oldF = t.F;
mov(molecules[i].F, molecules[i].oldF);
//t.F = t.newF;
mov(molecules[i].newF, molecules[i].F);
}
}
__global__ void cuda_dropNewF(CUDASpace *cs)
{
auto underspaces = GET_POINTER(CUDAUnderspace, cs, cs->underspacesShift);
size_t nx = blockDim.x * blockIdx.x + threadIdx.x;
if (nx >= cs->Nx) return;
size_t ny = blockDim.y * blockIdx.y + threadIdx.y;
if (ny >= cs->Ny) return;
size_t nz = blockDim.z * blockIdx.z + threadIdx.z;
if (nz >= cs->Nz) return;
CUDAUnderspace& cus = underspaces[LINEAR(cs, nx, ny, nz)];
auto molecules = GET_POINTER(CUDAMolecule, &cus, cus.moleculesShift);
for (size_t i = 0; i < cus.numberOfMolecules; ++i) {
//t.newF = Vector();
molecules[i].newF.v[0] = 0;
molecules[i].newF.v[1] = 0;
molecules[i].newF.v[2] = 0;
}
}
//__global__ void cuda_recalculateForcesForMolecules(CUDASpace *cs)
//{
// auto allMolecules = GET_POINTER(CUDAMolecule, cs, cs->underspacesShift + cs->Nx*cs->Ny*cs->Nz*sizeof(CUDAUnderspace));
// size_t molecule = blockDim.x * blockIdx.x + threadIdx.x;
//
//
//}
void cuda_oneStep(CUDASpace *d_cs,int Nx, int Ny, int Nz)
{
int numberOfCores = 512;
int coresPerDim = int(pow(numberOfCores, 1.0/3.0));
//int coresPerDim = int(sqrt(numberOfCores));
dim3 grid, blocks;
grid = dim3(Nx / coresPerDim + 1, Ny / coresPerDim + 1, Nz / coresPerDim + 1);
blocks = dim3(coresPerDim, coresPerDim, coresPerDim);
//grid = dim3(Nx / coresPerDim + 1, Ny / coresPerDim + 1, Nz);
//blocks = dim3(coresPerDim, coresPerDim, 1);
cuda_recalculatePositions <<<grid, blocks>>> (d_cs);
//auto cudaStatus = cudaGetLastError();
cudaDeviceSynchronize();
cuda_dropNewF <<<grid, blocks>>> (d_cs);
cudaDeviceSynchronize();
cuda_recalculateForces <<<grid, blocks>>> (d_cs);
cudaDeviceSynchronize();
cuda_recalculateSpeeds <<<grid, blocks>>> (d_cs);
cudaDeviceSynchronize();
cuda_validate <<<grid, blocks>>> (d_cs);
cudaDeviceSynchronize();
}
extern CUDASpace* moveFromHost(CUDASpace *h_cs, size_t wholeSize/* = 0*/)
{
CUDASpace *d_cs;
if (wholeSize == 0)
wholeSize = WHOLE_SIZE_OF_SPACE(h_cs);
cudaMalloc(&d_cs, wholeSize); //allocate device memory for all data
cudaMemcpy(d_cs, h_cs, wholeSize, cudaMemcpyHostToDevice); //copy all data from h_cs (host) to d_cs (device)
//delete[] reinterpret_cast<byte*>(h_cs); //delete data from host memory;
return d_cs;
}
CUDASpace* moveFromDevice(CUDASpace *d_cs, size_t wholeSize/* = 0*/, byte *h_p/* = nullptr*/)
{
if (wholeSize == 0)
throw 0; //TODO copy CUDASpace only from device and calculate wholeSize
if (h_p == nullptr)
h_p = new byte[wholeSize]; //allocate host memory for all data
cudaMemcpy(h_p, d_cs, wholeSize, cudaMemcpyDeviceToHost); //copy all data
auto h_cs = reinterpret_cast<CUDASpace*>(h_p); //get pointer to CUDASpace
cudaFree(d_cs); //delete data from device memory
return h_cs;
}
/*
void freeDeviceMem(CUDASpace *d_cs)
{
for (size_t i = 0; i < d_cs->Nx; ++i) {
for (size_t j = 0; j < d_cs->Ny; ++j) {
for (size_t k = 0; k < d_cs->Nz; ++k) {
cudaFree(d_cs->underspaces[i][j][k].molecules);
}
cudaFree(d_cs->underspaces[i][j]);
}
cudaFree(d_cs->underspaces[i]);
}
cudaFree(d_cs->underspaces);
cudaFree(d_cs);
}
void freeHostMem(CUDASpace *h_cs)
{
for (size_t i = 0; i < h_cs->Nx; ++i) {
for (size_t j = 0; j < h_cs->Ny; ++j) {
for (size_t k = 0; k < h_cs->Nz; ++k) {
delete h_cs->underspaces[i][j][k].molecules;
}
delete h_cs->underspaces[i][j];
}
delete h_cs->underspaces[i];
}
delete h_cs->underspaces;
delete h_cs;
}
*/
|
03574c8a80955f0a3a49afad4a3f3c3f42855520.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch_pybind/device_vector_wrapper.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace wrapper {
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(){};
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const device_vector_wrapper<Type>& other)
: data_(other.data_) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const utility::pinned_host_vector<Type>& other)
: data_(other) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const void* h_pointer, int size) : data_(size) {
cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(data_.data()), h_pointer,
size * sizeof(Type), hipMemcpyHostToDevice));
}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const utility::device_vector<Type>& other)
: data_(other) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
utility::device_vector<Type>&& other) noexcept
: data_(std::move(other)) {}
template <typename Type>
device_vector_wrapper<Type>::~device_vector_wrapper(){};
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator=(
const device_vector_wrapper<Type>& other) {
data_ = other.data_;
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator+=(
const utility::device_vector<Type>& other) {
thrust::transform(data_.begin(), data_.end(), other.begin(), data_.begin(),
thrust::plus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator+=(
const thrust::host_vector<Type>& other) {
utility::device_vector<Type> dvo = other;
thrust::transform(data_.begin(), data_.end(), dvo.begin(), data_.begin(),
thrust::plus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator-=(
const utility::device_vector<Type>& other) {
thrust::transform(data_.begin(), data_.end(), other.begin(), data_.begin(),
thrust::minus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator-=(
const thrust::host_vector<Type>& other) {
utility::device_vector<Type> dvo = other;
thrust::transform(data_.begin(), data_.end(), dvo.begin(), data_.begin(),
thrust::minus<Type>());
return *this;
}
template <typename Type>
size_t device_vector_wrapper<Type>::size() const {
return data_.size();
}
template <typename Type>
bool device_vector_wrapper<Type>::empty() const {
return data_.empty();
}
template <typename Type>
void device_vector_wrapper<Type>::push_back(const Type& x) {
data_.push_back(x);
}
template <typename Type>
utility::pinned_host_vector<Type> device_vector_wrapper<Type>::cpu() const {
utility::pinned_host_vector<Type> ans(data_.size());
cudaSafeCall(hipMemcpy(ans.data(), thrust::raw_pointer_cast(data_.data()),
sizeof(Type) * data_.size(), hipMemcpyDeviceToHost));
return ans;
}
template class device_vector_wrapper<Eigen::Vector3f>;
template class device_vector_wrapper<Eigen::Vector2f>;
template class device_vector_wrapper<Eigen::Vector3i>;
template class device_vector_wrapper<Eigen::Vector2i>;
template class device_vector_wrapper<Eigen::Matrix<float, 33, 1>>;
template class device_vector_wrapper<float>;
template class device_vector_wrapper<int>;
template class device_vector_wrapper<size_t>;
template class device_vector_wrapper<geometry::OccupancyVoxel>;
template class device_vector_wrapper<collision::PrimitivePack>;
template <typename Type>
void FromWrapper(utility::device_vector<Type>& dv,
const device_vector_wrapper<Type>& vec) {
dv = vec.data_;
}
template void FromWrapper<Eigen::Vector3f>(
utility::device_vector<Eigen::Vector3f>& dv,
const device_vector_wrapper<Eigen::Vector3f>& vec);
template void FromWrapper<Eigen::Vector2f>(
utility::device_vector<Eigen::Vector2f>& dv,
const device_vector_wrapper<Eigen::Vector2f>& vec);
template void FromWrapper<Eigen::Vector3i>(
utility::device_vector<Eigen::Vector3i>& dv,
const device_vector_wrapper<Eigen::Vector3i>& vec);
template void FromWrapper<Eigen::Vector2i>(
utility::device_vector<Eigen::Vector2i>& dv,
const device_vector_wrapper<Eigen::Vector2i>& vec);
template void FromWrapper<Eigen::Matrix<float, 33, 1>>(
utility::device_vector<Eigen::Matrix<float, 33, 1>>& dv,
const device_vector_wrapper<Eigen::Matrix<float, 33, 1>>& vec);
template void FromWrapper<float>(utility::device_vector<float>& dv,
const device_vector_wrapper<float>& vec);
template void FromWrapper<int>(utility::device_vector<int>& dv,
const device_vector_wrapper<int>& vec);
template void FromWrapper<size_t>(utility::device_vector<size_t>& dv,
const device_vector_wrapper<size_t>& vec);
template void FromWrapper<geometry::OccupancyVoxel>(
utility::device_vector<geometry::OccupancyVoxel>& dv,
const device_vector_wrapper<geometry::OccupancyVoxel>& vec);
template void FromWrapper<collision::PrimitivePack>(
utility::device_vector<collision::PrimitivePack>& dv,
const device_vector_wrapper<collision::PrimitivePack>& vec);
#if defined(_WIN32)
template class device_vector_wrapper<unsigned long>;
template void FromWrapper<unsigned long>(
utility::device_vector<unsigned long>& dv,
const device_vector_wrapper<unsigned long>& vec);
#endif
} // namespace wrapper
} // namespace cupoch | 03574c8a80955f0a3a49afad4a3f3c3f42855520.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch_pybind/device_vector_wrapper.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace wrapper {
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(){};
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const device_vector_wrapper<Type>& other)
: data_(other.data_) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const utility::pinned_host_vector<Type>& other)
: data_(other) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const void* h_pointer, int size) : data_(size) {
cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(data_.data()), h_pointer,
size * sizeof(Type), cudaMemcpyHostToDevice));
}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
const utility::device_vector<Type>& other)
: data_(other) {}
template <typename Type>
device_vector_wrapper<Type>::device_vector_wrapper(
utility::device_vector<Type>&& other) noexcept
: data_(std::move(other)) {}
template <typename Type>
device_vector_wrapper<Type>::~device_vector_wrapper(){};
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator=(
const device_vector_wrapper<Type>& other) {
data_ = other.data_;
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator+=(
const utility::device_vector<Type>& other) {
thrust::transform(data_.begin(), data_.end(), other.begin(), data_.begin(),
thrust::plus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator+=(
const thrust::host_vector<Type>& other) {
utility::device_vector<Type> dvo = other;
thrust::transform(data_.begin(), data_.end(), dvo.begin(), data_.begin(),
thrust::plus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator-=(
const utility::device_vector<Type>& other) {
thrust::transform(data_.begin(), data_.end(), other.begin(), data_.begin(),
thrust::minus<Type>());
return *this;
}
template <typename Type>
device_vector_wrapper<Type>& device_vector_wrapper<Type>::operator-=(
const thrust::host_vector<Type>& other) {
utility::device_vector<Type> dvo = other;
thrust::transform(data_.begin(), data_.end(), dvo.begin(), data_.begin(),
thrust::minus<Type>());
return *this;
}
template <typename Type>
size_t device_vector_wrapper<Type>::size() const {
return data_.size();
}
template <typename Type>
bool device_vector_wrapper<Type>::empty() const {
return data_.empty();
}
template <typename Type>
void device_vector_wrapper<Type>::push_back(const Type& x) {
data_.push_back(x);
}
template <typename Type>
utility::pinned_host_vector<Type> device_vector_wrapper<Type>::cpu() const {
utility::pinned_host_vector<Type> ans(data_.size());
cudaSafeCall(cudaMemcpy(ans.data(), thrust::raw_pointer_cast(data_.data()),
sizeof(Type) * data_.size(), cudaMemcpyDeviceToHost));
return ans;
}
template class device_vector_wrapper<Eigen::Vector3f>;
template class device_vector_wrapper<Eigen::Vector2f>;
template class device_vector_wrapper<Eigen::Vector3i>;
template class device_vector_wrapper<Eigen::Vector2i>;
template class device_vector_wrapper<Eigen::Matrix<float, 33, 1>>;
template class device_vector_wrapper<float>;
template class device_vector_wrapper<int>;
template class device_vector_wrapper<size_t>;
template class device_vector_wrapper<geometry::OccupancyVoxel>;
template class device_vector_wrapper<collision::PrimitivePack>;
template <typename Type>
void FromWrapper(utility::device_vector<Type>& dv,
const device_vector_wrapper<Type>& vec) {
dv = vec.data_;
}
template void FromWrapper<Eigen::Vector3f>(
utility::device_vector<Eigen::Vector3f>& dv,
const device_vector_wrapper<Eigen::Vector3f>& vec);
template void FromWrapper<Eigen::Vector2f>(
utility::device_vector<Eigen::Vector2f>& dv,
const device_vector_wrapper<Eigen::Vector2f>& vec);
template void FromWrapper<Eigen::Vector3i>(
utility::device_vector<Eigen::Vector3i>& dv,
const device_vector_wrapper<Eigen::Vector3i>& vec);
template void FromWrapper<Eigen::Vector2i>(
utility::device_vector<Eigen::Vector2i>& dv,
const device_vector_wrapper<Eigen::Vector2i>& vec);
template void FromWrapper<Eigen::Matrix<float, 33, 1>>(
utility::device_vector<Eigen::Matrix<float, 33, 1>>& dv,
const device_vector_wrapper<Eigen::Matrix<float, 33, 1>>& vec);
template void FromWrapper<float>(utility::device_vector<float>& dv,
const device_vector_wrapper<float>& vec);
template void FromWrapper<int>(utility::device_vector<int>& dv,
const device_vector_wrapper<int>& vec);
template void FromWrapper<size_t>(utility::device_vector<size_t>& dv,
const device_vector_wrapper<size_t>& vec);
template void FromWrapper<geometry::OccupancyVoxel>(
utility::device_vector<geometry::OccupancyVoxel>& dv,
const device_vector_wrapper<geometry::OccupancyVoxel>& vec);
template void FromWrapper<collision::PrimitivePack>(
utility::device_vector<collision::PrimitivePack>& dv,
const device_vector_wrapper<collision::PrimitivePack>& vec);
#if defined(_WIN32)
template class device_vector_wrapper<unsigned long>;
template void FromWrapper<unsigned long>(
utility::device_vector<unsigned long>& dv,
const device_vector_wrapper<unsigned long>& vec);
#endif
} // namespace wrapper
} // namespace cupoch |
816fbf7e1e997c93a1e1bfd1df2cbbfbb77ca827.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ====================================================================== //
// pvcore -- simple parallel computer vision library
// Copyright (C) 2012 Niklas Bergstrm
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// ====================================================================== //
///////////////////////////////////////////////
/////////////////// CONSTANTS /////////////////
///////////////////////////////////////////////
#include "pvcore/keypoint.h"
#include "pvcore/cuda_functions.h"
#include "pvcore/common.h"
#include <math_constants.h>
texture<unsigned char, hipTextureType2D, hipReadModeElementType> tex_8u;
texture<unsigned int, hipTextureType2D, hipReadModeElementType> tex_32u;
texture<float, hipTextureType2D, hipReadModeElementType> tex_32f;
texture<unsigned int, hipTextureType2D, hipReadModeElementType> tex_32u_0;
texture<unsigned int, hipTextureType2D, hipReadModeElementType> tex_32u_1;
texture<uchar4, hipTextureType2D, hipReadModeElementType> tex_8u4_0;
texture<uchar4, hipTextureType2D, hipReadModeElementType> tex_8u4_1;
__global__ void saliency_3(float* _dest,
int _width,
int _height,
int _pitchd,
float _threshold) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
if ( y < _height && x < _width ) {
int ip = tex2D(tex_8u,x,y);
// Get local contrast
int contrast = abs(ip - tex2D(tex_8u,x,y-3));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+1,y-3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y-1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y+1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+1,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-1,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y+1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y-1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-1,y-3)));
float contrastf = contrast * (1.f/255.f);
int saliency = abs(tex2D(tex_8u,x,y-3) + tex2D(tex_8u,x,y+3) - 2*ip);
saliency = min(saliency,abs(tex2D(tex_8u,x+1,y-3) + tex2D(tex_8u,x-1,y+3) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y-2) + tex2D(tex_8u,x-2,y+2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y-1) + tex2D(tex_8u,x-3,y+1) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y) + tex2D(tex_8u,x-3,y) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y+1) + tex2D(tex_8u,x-3,y-1) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y+2) + tex2D(tex_8u,x-2,y-2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+1,y+3) + tex2D(tex_8u,x-1,y-3) - 2*ip));
float saliencyf = saliency * (1.f/255.f);
_dest[y*_pitchd+x] = contrastf < _threshold ? 0.0f : saliencyf/contrastf;
}
}
__global__ void saliency_6(float* _dest,
int _width,
int _height,
int _pitchd,
float _threshold) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
if ( y < _height && x < _width ) {
int ip = tex2D(tex_8u,x,y);
// Get local contrast
int contrast = abs(ip - tex2D(tex_8u,x,y-6));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y-6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+4,y-4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y )));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+4,y+4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x ,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-4,y+4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y )));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-4,y-4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y-6)));
float contrastf = contrast * (1.f/255.f);
int saliency = abs(tex2D(tex_8u,x,y-6) + tex2D(tex_8u,x,y+6) - 2*ip);
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y-6) + tex2D(tex_8u,x-2,y+6) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+4,y-4) + tex2D(tex_8u,x-4,y+4) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y-2) + tex2D(tex_8u,x-6,y+2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y) + tex2D(tex_8u,x-6,y) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y+2) + tex2D(tex_8u,x-6,y-2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+4,y+4) + tex2D(tex_8u,x-4,y-4) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y+6) + tex2D(tex_8u,x-2,y-6) - 2*ip));
float saliencyf = saliency * (1.f/255.f);
float res = contrastf < _threshold ? 0.0f : saliencyf/contrastf;
_dest[y*_pitchd+x] = fmax(_dest[y*_pitchd+x],res);
}
}
__global__ void nonmax_sup(float* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
float pixelval = tex2D(tex_32f,x,y);
float tmp = tex2D(tex_32f,x-1,y-1);
tmp = fmax(tmp,tex2D(tex_32f,x ,y-1));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y-1));
tmp = fmax(tmp,tex2D(tex_32f,x-1,y));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y));
tmp = fmax(tmp,tex2D(tex_32f,x-1,y+1));
tmp = fmax(tmp,tex2D(tex_32f,x ,y+1));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y+1));
_dest[y*_pitchd+x] = (pixelval >= tmp ? (pixelval > 0.25 ? pixelval : 0.0f) : 0.0f);
}
/**
* \brief Extracts features for all points in image tex_32u
*
* \param _dest Destination image of feature points (unsigned char [16])
* \param _width Width of image
* \param _height Height of image
* \param _pitch Elements per row of _dest
*/
__global__ void weak_features_half(unsigned int* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
// Allocate shared memory for 4x16x16 16-byte feature vectors, and add
// extra byte for padding of
__shared__ unsigned int shm[4*4*16*16+16*16];
// Read contributions for eight pixels for four consecutive pixels
unsigned int s0 = (tex2D(tex_32u, x , y-3) & 0xF0F0F0F0) >> 4;
unsigned int s1 = (tex2D(tex_32u, x+1, y-3) & 0xF0F0F0F0) >> 4;
unsigned int s2 = (tex2D(tex_32u, x+2, y-2) & 0xF0F0F0F0) >> 4;
unsigned int s3 = (tex2D(tex_32u, x+3, y-1) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x+3, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x+3, y+1) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x+2, y+2) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x+1, y+3) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+4] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+8] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+12] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y+3) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x-1, y+3) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x-2, y+2) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x-3, y+1) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x-3, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x-3, y-1) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x-2, y-2) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x-1, y-3) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+1] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+5] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+9] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+13] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y-6) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x+2, y-6) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x+4, y-4) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x+6, y-2) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x+6, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x+6, y+2) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x+4, y+4) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x+2, y+6) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+2] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+6] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+10] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+14] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y+6) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x-2, y+6) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x-4, y+4) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x-6, y+2) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x-6, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x-6, y-2) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x-4, y-4) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x-2, y-6) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+3] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+7] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+11] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+15] = s3;
__syncthreads();
// Store shared memory in _dest
#pragma unroll
for (int i=0; i<16; ++i) {
_dest[y*_pitchd+16*blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x] = shm[17*16*threadIdx.y + 17*i + threadIdx.x];
}
}
__global__ void weak_features_full(unsigned int* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
// Allocate shared memory for 4x16x16 16-byte feature vectors, and add
// extra byte for padding of
__shared__ unsigned int shm[4*4*16*16+16*16];
// Read contributions for eight pixels for four consecutive pixels
unsigned int s0 = tex2D(tex_32u, x , y-3);
unsigned int s1 = tex2D(tex_32u, x+2, y-2);
unsigned int s2 = tex2D(tex_32u, x+3, y );
unsigned int s3 = tex2D(tex_32u, x+2, y+2);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+4] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+8] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+12] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y+3);
s1 = tex2D(tex_32u, x-2, y+2);
s2 = tex2D(tex_32u, x-3, y );
s3 = tex2D(tex_32u, x-2, y-2);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+1] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+5] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+9] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+13] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y-6);
s1 = tex2D(tex_32u, x+4, y-4);
s2 = tex2D(tex_32u, x+6, y );
s3 = tex2D(tex_32u, x+4, y+4);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+2] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+6] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+10] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+14] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y+6);
s1 = tex2D(tex_32u, x-4, y+4);
s2 = tex2D(tex_32u, x-6, y );
s3 = tex2D(tex_32u, x-4, y-4);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+3] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+7] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+11] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+15] = s3;
__syncthreads();
// Store shared memory in _dest
#pragma unroll
for (int i=0; i<16; ++i) {
_dest[y*_pitchd+16*blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x] = shm[17*16*threadIdx.y + 17*i + threadIdx.x];
}
}
__global__ void count_points(float* _pts,
unsigned char* _count,
float _threshold,
int _pitch) {
__shared__ unsigned char shm[16*8];
// Global indices
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = 2*blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*_pitch + x;
int reductionoffset = 8*_pitch;
// Thread block indices
int shmidx = threadIdx.y*16+threadIdx.x;
// Read two bytes per thread
shm[shmidx] = (_pts[idx] > _threshold ? 1 : 0) + (_pts[idx+reductionoffset] > _threshold ? 1 : 0);
__syncthreads();
// 64 numbers to add
if( shmidx < 64 ) {
shm[shmidx] += shm[shmidx+64];
__syncthreads();
if( shmidx < 32 ) {
// 64 numbers to add
shm[shmidx] += shm[shmidx+32];
__syncthreads();
if( shmidx < 16 ) {
// 32 numbers to add
shm[shmidx] += shm[shmidx+16];
if( shmidx < 8 ) {
// 16 numbers to add
shm[shmidx] += shm[shmidx+8];
if( shmidx < 4 ) {
// 8 numbers to add
shm[shmidx] += shm[shmidx+4];
if( shmidx < 2 ) {
// 4 numbers to add
shm[shmidx] += shm[shmidx+2];
if( shmidx == 0 ) {
// 2 numbers to add
_count[blockIdx.y*gridDim.x+blockIdx.x] = (shm[0] + shm[1]);
}
}
}
}
}
}
}
}
// _edges holds the binary image
// _ptsperblock holds the block's index of the point list and the number of points
__global__ void createPointList(float* _src,
int* _ptsperblock,
pvcore::keypt* _pointlist,
int _pitch) {
// Index of the block - will give how many points should be stored in _pointlist
int blockidx = blockIdx.y*gridDim.x + blockIdx.x;
int ptidx = _ptsperblock[blockidx];
// Number of points to save
int npts = _ptsperblock[blockidx+1] - ptidx;
// Thread block indices (0-127)
int shmidx = threadIdx.y*16 + threadIdx.x;
int direction = threadIdx.y*16 + threadIdx.x;
// No need to sort if no points in block
if( npts == 0 ) {
return;
}
int reductionoffset = 8*_pitch;
__shared__ float shm_points[16*17];
__shared__ unsigned char ptlist[16*17];
// Indices to global point
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = 2*blockIdx.y * blockDim.y + threadIdx.y;
int idx = y*_pitch + x;
// Shared memory holding a copy of the feature point image
shm_points[shmidx] = _src[idx];
shm_points[shmidx+128] = _src[idx+reductionoffset];
// Shared memory holding the (local) indices of the feature points
ptlist[shmidx] = shmidx;
ptlist[shmidx+128] = shmidx+128;
__syncthreads();
// Sort 256 numbers, largest first !!POTENTIAL BANK CONFLICTS!!
// Implements bitonic merge sort
// Loop
for( int i=1; i < 256; i <<= 1 ) {
// Outer loop determines sorting direction
int sortdir = (direction & i) > 0 ? 0 : 1;
// Inner loop
for( int j=i; j > 0; j >>= 1 ) {
// New index
int mask = 0x0FFFFFFF * j;
int tidx = ((shmidx&mask) << 1) + (shmidx & ~mask);
atomicSort(shm_points,ptlist,tidx,j,j*sortdir);
__syncthreads();
}
}
// Save points in list
if( shmidx < npts ) {
_pointlist[ptidx + shmidx].x = blockIdx.x * blockDim.x + (ptlist[shmidx] & 0x0F);
_pointlist[ptidx + shmidx].y = 2*blockIdx.y * blockDim.y + ((ptlist[shmidx] & 0xF0) >> 4);
}
// _edges[idx] = shm_points[shmidx];
// _edges[idx+reductionoffset] = shm_points[shmidx+128];
}
__device__ inline int bin_dist(unsigned int* _src,
unsigned int* _ref) {
int t = __popc( _src[0] ^ _ref[0] );
t += __popc( _src[1] ^ _ref[1] );
t += __popc( _src[2] ^ _ref[2] );
t += __popc( _src[3] ^ _ref[3] );
return t;
}
__device__ inline unsigned long L2_dist(uchar4* _src,
uchar4* _ref,
int _npts) {
unsigned long sum = 0;
for (int i=0; i<_npts; ++i) {
sum += (_src[i].x-_ref[i].x) * (_src[i].x-_ref[i].x);
sum += (_src[i].y-_ref[i].y) * (_src[i].y-_ref[i].y);
sum += (_src[i].z-_ref[i].z) * (_src[i].z-_ref[i].z);
sum += (_src[i].w-_ref[i].w) * (_src[i].w-_ref[i].w);
}
return sum;
}
template <int _area>
__global__ void best_L2_match(const pvcore::keypt* _srcpts,
pvcore::keypt* _destpts) {
pvcore::keypt pt = _srcpts[blockDim.x*blockIdx.x + threadIdx.x];
uchar4 src[4], ref[4];
ref[0] = tex2D(tex_8u4_0, 4*pt.x, pt.y);
ref[1] = tex2D(tex_8u4_0, 4*pt.x+1, pt.y);
ref[2] = tex2D(tex_8u4_0, 4*pt.x+2, pt.y);
ref[3] = tex2D(tex_8u4_0, 4*pt.x+3, pt.y);
pvcore::keypt destpt;
destpt.x = pt.x;
destpt.y = pt.y;
unsigned long dist = 255*255*16+1;
#pragma unroll
for (int y=-_area; y<=_area; ++y) {
unsigned long t;
#pragma unroll
for (int x=-_area; x<=_area; ++x) {
src[0] = tex2D(tex_8u4_1, 4*(pt.x+x), pt.y+y);
src[1] = tex2D(tex_8u4_1, 4*(pt.x+x)+1, pt.y+y);
src[2] = tex2D(tex_8u4_1, 4*(pt.x+x)+2, pt.y+y);
src[3] = tex2D(tex_8u4_1, 4*(pt.x+x)+3, pt.y+y);
t = L2_dist(src,ref,4);
if (t < dist) {
dist = t;
destpt.x = pt.x+x;
destpt.y = pt.y+y;
}
}
}
_destpts[blockDim.x*blockIdx.x + threadIdx.x] = destpt;
}
template <int _area>
__global__ void best_bin_match(const pvcore::keypt* _srcpts,
pvcore::keypt* _destpts) {
pvcore::keypt pt = _srcpts[blockDim.x*blockIdx.x + threadIdx.x];
unsigned int src[4], ref[4];
ref[0] = tex2D(tex_32u_0, 4*pt.x, pt.y);
ref[1] = tex2D(tex_32u_0, 4*pt.x+1, pt.y);
ref[2] = tex2D(tex_32u_0, 4*pt.x+2, pt.y);
ref[3] = tex2D(tex_32u_0, 4*pt.x+3, pt.y);
pvcore::keypt destpt;
destpt.x = pt.x;
destpt.y = pt.y;
int dist = 129;
#pragma unroll
for (int y=-_area; y<=_area; ++y) {
#pragma unroll
for (int x=-_area; x<=_area; ++x) {
src[0] = tex2D(tex_32u_1, 4*(pt.x+x), pt.y+y);
src[1] = tex2D(tex_32u_1, 4*(pt.x+x)+1, pt.y+y);
src[2] = tex2D(tex_32u_1, 4*(pt.x+x)+2, pt.y+y);
src[3] = tex2D(tex_32u_1, 4*(pt.x+x)+3, pt.y+y);
int t = bin_dist(src,ref);
if (t <= dist) {
dist = t;
destpt.x = pt.x+x;
destpt.y = pt.y+y;
}
}
}
_destpts[blockDim.x*blockIdx.x + threadIdx.x] = destpt;
}
template <typename T>
__global__ void zero(T* _dest, int _pitch) {
_dest[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x] = (T)0;
}
__global__ void zeroVel(pvcore::keypt_vel* _vel, int _pitch) {
_vel[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x].x = 0.0f;
_vel[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x].y = 0.0f;
}
__global__ void fill_features(pvcore::keypt* _features,
float* _dest,
int _nfeatures,
int _pitchd) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if( idx < _nfeatures ) {
_dest[_features[idx].y*_pitchd + _features[idx].x] = 1.0f;
}
}
__global__ void predict_points(pvcore::keypt* _srcpt, pvcore::keypt_vel* _srcvel, pvcore::keypt* _destpt, int _npts) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < _npts) {
_destpt[idx].x = _srcpt[idx].x + _srcvel[idx].x;
_destpt[idx].y = _srcpt[idx].y + _srcvel[idx].y;
}
}
__global__ void add_even(const float* _src,
float* _dest,
int _width,
int _height,
int _pitchd) {
int y = (blockDim.y*blockIdx.y + threadIdx.y)*2;
int x = (blockDim.x*blockIdx.x + threadIdx.x)*2;
__shared__ float shm[18*18];
if (y>0 && y<_height-1 && x>0 && x<_width-1) {
float sum = 0.0f;
sum += tex2D( tex_32f, x-1, y-1);
sum += tex2D( tex_32f, x , y-1);
sum += tex2D( tex_32f, x+1, y-1);
sum += tex2D( tex_32f, x-1, y );
sum += tex2D( tex_32f, x , y );
sum += tex2D( tex_32f, x+1, y );
sum += tex2D( tex_32f, x-1, y+1);
sum += tex2D( tex_32f, x , y+1);
sum += tex2D( tex_32f, x+1, y+1);
if (sum == 0) {
_dest[y*_pitchd+x] = _src[y*_pitchd+x];
}
}
}
__global__ void add_odd(const float* _src,
float* _dest,
int _width,
int _height,
int _pitchd) {
int y = (blockDim.y*blockIdx.y + threadIdx.y)*2+1;
int x = (blockDim.x*blockIdx.x + threadIdx.x)*2+1;
__shared__ float shm[18*18];
if (y<_height-1 && x<_width-1) {
float sum = 0.0f;
sum += tex2D( tex_32f, x-1, y-1);
sum += tex2D( tex_32f, x , y-1);
sum += tex2D( tex_32f, x+1, y-1);
sum += tex2D( tex_32f, x-1, y );
sum += tex2D( tex_32f, x , y );
sum += tex2D( tex_32f, x+1, y );
sum += tex2D( tex_32f, x-1, y+1);
sum += tex2D( tex_32f, x , y+1);
sum += tex2D( tex_32f, x+1, y+1);
if (sum == 0) {
_dest[y*_pitchd+x] = _src[y*_pitchd+x];
}
}
}
namespace pvcore {
// The results from this method will differ. Consider implementing own
template <typename T>
hipError_t __resizeGPU(const T* _src,
T* _dest,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _pitchd,
double _scale) {
NppiSize osz; osz.width = _width; osz.height = _height;
NppiRect oroi; oroi.x = 0; oroi.y = 0; oroi.width = _width; oroi.height = _height;
NppiRect droi; droi.x = 1; droi.y = 1; droi.width = _width*_scale; droi.height = _height*_scale;
switch (sizeof(T)) {
case 4:
if (_pitchs/_width == 1) {
nppiResizeSqrPixel_32f_C1R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 3) {
nppiResizeSqrPixel_32f_C3R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 4) {
nppiResizeSqrPixel_32f_C4R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
}
break;
case 1:
if (_pitchs/_width == 1) {
nppiResizeSqrPixel_8u_C1R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 3) {
nppiResizeSqrPixel_8u_C3R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 4) {
nppiResizeSqrPixel_8u_C4R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
}
break;
default:
break;
}
return hipSuccess;
}
// The results from this method will differ. Consider implementing own
template <typename T>
hipError_t __filterGaussGPU(const T* _src,
T* _dest,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _pitchd,
int _mask) {
NppiSize osz; osz.width = _width; osz.height = _height;
NppiMaskSize mask_size = NPP_MASK_SIZE_3_X_3;
if (_mask == 5) {
mask_size = NPP_MASK_SIZE_5_X_5;
}
switch (sizeof(T)) {
case 4:
if (_pitchs/_width == 1) {
nppiFilterGauss_32f_C1R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
} else if (_pitchs/_width == 3) {
nppiFilterGauss_32f_C3R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
} else if (_pitchs/_width == 4) {
nppiFilterGauss_32f_C4R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
}
break;
case 1:
if (_pitchs/_width == 1) {
nppiFilterGauss_8u_C1R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
} else if (_pitchs/_width == 3) {
nppiFilterGauss_8u_C3R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
} else if (_pitchs/_width == 4) {
nppiFilterGauss_8u_C4R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
}
break;
default:
break;
}
return hipSuccess;
}
struct semiDenseKeypointStruct {
static int nscales;
static bool inited;
// Filtered source images
unsigned char* scale3;
unsigned char* scale6;
// Saliency image
float* saliency;
// Number of feature points in cuda grid
unsigned char* ptcount;
unsigned char* ptcount_h;
// Accumulated feature points in the grid
int* idxvector;
int* idxvector_h;
// Keypoints
float* keypoints_img;
keypt* keypoints;
int nkeypoints;
// Pitch for
size_t fpitch, ipitch;
size_t fpitch_h, ipitch_h;
pvcore::keypt* featurepts_h;
};
int semiDenseKeypointStruct::nscales = 1;
bool semiDenseKeypointStruct::inited = false;
semiDenseKeypointStruct* gKeypoints = NULL;
// Initializes device and host memory for semi-dense optical flow
hipError_t __initKeypointBuffer(int _width, int _height, int _nscales, dim3 _blockDim) {
printf("initing keypoint buffer with %d scales\n",_nscales);
if (semiDenseKeypointStruct::inited) {
printf("Already inited\n");
return hipSuccess;
}
semiDenseKeypointStruct::nscales = _nscales;
gKeypoints = new semiDenseKeypointStruct[_nscales];
dim3 gridDim; gridDim.z = 1;
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
size_t ipitch, fpitch;
hipMallocPitch((void**)&gKeypoints[i].scale3, &ipitch, _width/div, _height/div);
hipMallocPitch((void**)&gKeypoints[i].scale6, &ipitch, _width/div, _height/div);
hipMallocPitch((void**)&gKeypoints[i].saliency, &fpitch, 4*_width/div, _height/div);
hipMallocPitch((void**)&gKeypoints[i].keypoints_img, &fpitch, 4*_width/div, _height/div);
hipMalloc((void**)&gKeypoints[i].keypoints, 4*_width*_height/(div*div) );
gKeypoints[i].nkeypoints = 0;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
hipMalloc((void**)&gKeypoints[i].ptcount, gridDim.x*gridDim.y);
gKeypoints[i].ptcount_h = new unsigned char[gridDim.x*gridDim.y];
hipMalloc((void**)&gKeypoints[i].idxvector, (gridDim.x*gridDim.y+1)*sizeof(int));
gKeypoints[i].idxvector_h = new int[gridDim.x*gridDim.y+1];
gKeypoints[i].featurepts_h = new pvcore::keypt[_width/div*_height/div];
gKeypoints[i].ipitch = ipitch;
gKeypoints[i].fpitch = fpitch;
}
semiDenseKeypointStruct::inited = true;
return hipSuccess;
}
// Releases device and host memory for semi-dense optical flow
void __freeKeypointBuffer() {
for( int i=0; i<semiDenseKeypointStruct::nscales; ++i ) {
hipFree(gKeypoints[i].scale3);
hipFree(gKeypoints[i].scale6);
hipFree(gKeypoints[i].saliency);
hipFree(gKeypoints[i].ptcount);
delete [] gKeypoints[i].ptcount_h;
hipFree(gKeypoints[i].idxvector);
delete [] gKeypoints[i].idxvector_h;
delete [] gKeypoints[i].featurepts_h;
}
semiDenseKeypointStruct::inited = false;
}
hipError_t __initOpticalFlowStructGPU(opticalFlowStruct* _src,
int _width,
int _height,
int _nscales) {
//*_src = new opticalFlowStruct[_nscales];
hipError_t err = hipSuccess;
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
// Init keypoint struct (Maximum number of keypoints is as many as there are pixels in the image)
err = hipMalloc((void**)&(_src[i].keypoints), sizeof(keypt)*_width/div*_height/div);
err = hipMalloc((void**)&(_src[i].keypoints_vel), sizeof(keypt_vel)*_width/div*_height/div);
_src[i].nkeypoints = 0;
err = hipMallocPitch((void**)&(_src[i].keypoints_img),
&(_src[i].keypoints_img_pitch),
_width*sizeof(float)/div, _height/div);
_src[i].keypoints_img_pitch /= sizeof(float);
err = hipMallocPitch((void**)&(_src[i].features), &(_src[i].features_pitch),
_width*sizeof(featurept)/div, _height/div);
_src[i].features_pitch /= sizeof(featurept);
dim3 blockDim; blockDim.x = blockDim.y = 16; blockDim.z = 1;
dim3 gridDim; gridDim.z = 1;
gridDim.x = GLOBAL_SIZE(_width/div,blockDim.x);
gridDim.y = GLOBAL_SIZE(_height/div,blockDim.y);
hipLaunchKernelGGL(( zeroVel), dim3(gridDim),dim3(blockDim), 0, 0, _src[i].keypoints_vel,_width/div);
}
return err;
}
hipError_t __initPredictionStructGPU(predictionStruct* _src,
int _width,
int _height,
int _nscales) {
//*_src = new predictionStruct[_nscales];
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
hipMalloc((void**)&_src[i].keypoints,_width/div*_height/div*sizeof(keypt));
_src[i].nkeypoints = 0;
hipMallocPitch((void**)&_src[i].features, &_src[i].features_pitch,
_width*sizeof(featurept)/div, _height/div);
_src[i].features_pitch /= sizeof(featurept);
}
return hipSuccess;
}
hipError_t __matchGPU(opticalFlowStruct* _opticalFlow,
predictionStruct* _prediction,
int _width, int _height, dim3 _blockDim) {
_blockDim.y = 1;
_blockDim.x = 128;
int nscales = semiDenseKeypointStruct::nscales;
dim3 gridDim; gridDim.y = gridDim.z = 1;
// Features
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8,8,8,8,hipChannelFormatKindUnsigned);
tex_8u4_0.addressMode[0] = hipAddressModeClamp;
tex_8u4_0.addressMode[1] = hipAddressModeClamp;
tex_8u4_0.filterMode = hipFilterModePoint;
tex_8u4_0.normalized = false;
tex_8u4_1.addressMode[0] = hipAddressModeClamp;
tex_8u4_1.addressMode[1] = hipAddressModeClamp;
tex_8u4_1.filterMode = hipFilterModePoint;
tex_8u4_1.normalized = false;
// hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindUnsigned);
// tex_32u_0.addressMode[0] = hipAddressModeClamp;
// tex_32u_0.addressMode[1] = hipAddressModeClamp;
// tex_32u_0.filterMode = hipFilterModePoint;
// tex_32u_0.normalized = false;
// tex_32u_1.addressMode[0] = hipAddressModeClamp;
// tex_32u_1.addressMode[1] = hipAddressModeClamp;
// tex_32u_1.filterMode = hipFilterModePoint;
// tex_32u_1.normalized = false;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
if (_opticalFlow[i].nkeypoints == 0) {
continue;
}
gridDim.x = GLOBAL_SIZE(_opticalFlow[i].nkeypoints, _blockDim.x);
hipMemcpy( gKeypoints[i].featurepts_h, _prediction[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,hipMemcpyDeviceToHost);
printf("#npoints: %d\n",_opticalFlow[i].nkeypoints);
for (int j=0; j<10; ++j) {
printf("kp %d before: %d %d\n",j,gKeypoints[i].featurepts_h[j].x,gKeypoints[i].featurepts_h[j].y);
}
hipBindTexture2D( 0, &tex_8u4_0, _prediction[i].features, &channelDesc,
_width/(4*div), _height/div,
_prediction[i].featuresBytesPerRow()/4);
hipBindTexture2D( 0, &tex_8u4_1, _prediction[i].features, &channelDesc,
_width/(4*div), _height/div,
_prediction[i].featuresBytesPerRow()/4);
hipLaunchKernelGGL(( best_L2_match<3>), dim3(gridDim),dim3(_blockDim), 0, 0, _prediction[i].keypoints, _opticalFlow[i].keypoints);
// hipBindTexture2D( 0, &tex_32u_0, _opticalFlow[i].features, &channelDesc,
// _width/(4*div), _height/div,
// _prediction[i].featuresBytesPerRow()/4);
// hipBindTexture2D( 0, &tex_32u_1, _prediction[i].features, &channelDesc,
// _width/(4*div), _height/div,
// _prediction[i].featuresBytesPerRow()/4);
// best_bin_match<3><<<gridDim,_blockDim>>>(_prediction[i].keypoints, _opticalFlow[i].keypoints);
hipMemcpy( gKeypoints[i].featurepts_h, _opticalFlow[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,hipMemcpyDeviceToHost);
for (int j=0; j<10; ++j) {
printf("kp %d after: %d %d\n",j,gKeypoints[i].featurepts_h[j].x,gKeypoints[i].featurepts_h[j].y);
}
// Now we have matched, so swap features in prediction and optical flow
featurept* tmp = _opticalFlow[i].features;
_opticalFlow[i].features = _prediction[i].features;
_prediction[i].features = tmp;
}
return hipSuccess;
}
hipError_t __predictPointsGPU(opticalFlowStruct* _opticalFlow,
predictionStruct* _prediction,
int _width, int _height, dim3 _blockDim) {
_blockDim.y = 1;
_blockDim.x = 128;
int nscales = semiDenseKeypointStruct::nscales;
dim3 gridDim; gridDim.y = gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
if (_opticalFlow[i].nkeypoints == 0) {
continue;
}
gridDim.x = GLOBAL_SIZE(_opticalFlow[i].nkeypoints, _blockDim.x);
hipLaunchKernelGGL(( predict_points), dim3(gridDim),dim3(_blockDim), 0, 0, _opticalFlow[i].keypoints, _opticalFlow[i].keypoints_vel,
_prediction[i].keypoints, _opticalFlow[i].nkeypoints);
_prediction[i].nkeypoints = _opticalFlow[i].nkeypoints;
}
return hipSuccess;
}
hipError_t __generateImagePyramid(const unsigned char* _src,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _nscales,
dim3 _blockDim) {
if (!semiDenseKeypointStruct::inited) {
__initKeypointBuffer(_width,_height,_nscales,_blockDim);
}
int nscales = semiDenseKeypointStruct::nscales;
// Resize and gauss filter to different levels
const unsigned char* timg = _src;
for (int i=1; i<nscales; ++i) {
__resizeGPU(timg, gKeypoints[i].scale3, _width,_height,
gKeypoints[i-1].ipitch, gKeypoints[i].ipitch,0.5);
timg = gKeypoints[i].scale3;
}
__filterGaussGPU(_src,gKeypoints[0].scale3, _width, _height, gKeypoints[0].ipitch,gKeypoints[0].ipitch,3);
__filterGaussGPU(gKeypoints[0].scale3, gKeypoints[0].scale6, _width, _height,
gKeypoints[0].ipitch, gKeypoints[0].ipitch,3);
for (int i=1; i<nscales; ++i) {
int div = 1 << i;
__filterGaussGPU(gKeypoints[i].scale3, gKeypoints[i].scale3, _width/div,_height/div,
gKeypoints[i].ipitch, gKeypoints[i].ipitch,3);
__filterGaussGPU(gKeypoints[i].scale3, gKeypoints[i].scale6, _width/div,_height/div,
gKeypoints[i].ipitch, gKeypoints[i].ipitch,3);
}
return hipSuccess;
}
hipError_t __extractFeaturesGPU(predictionStruct* _prediction,
int _width,
int _height,
dim3 _blockDim) {
int nscales = semiDenseKeypointStruct::nscales;
// Features
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindUnsigned);
tex_32u.addressMode[0] = hipAddressModeClamp;
tex_32u.addressMode[1] = hipAddressModeClamp;
tex_32u.filterMode = hipFilterModePoint;
tex_32u.normalized = false;
dim3 gridDim; gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/(div*4), _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
hipBindTexture2D( 0, &tex_32u, gKeypoints[i].scale3, &channelDesc,
_width/(4*div), _height/div, gKeypoints[i].ipitch );
hipLaunchKernelGGL(( weak_features_full), dim3(gridDim),dim3(_blockDim), 0, 0, (unsigned int*)_prediction[i].features,
_width/(div*4), _height/div, _prediction[i].featuresBytesPerRow()/4);
}
return hipSuccess;
}
// Extracts keypoints semi-dense optical flow
hipError_t __extractKeypointsGPU(opticalFlowStruct* _opticalFlow,
unsigned int _width,
unsigned int _height,
unsigned int _keypointType,
dim3 _blockDim) {
int nscales = semiDenseKeypointStruct::nscales;
// Compute saliency images
float threshold = (float)_keypointType*0.01f;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8,0,0,0,hipChannelFormatKindUnsigned);
tex_8u.addressMode[0] = hipAddressModeClamp;
tex_8u.addressMode[1] = hipAddressModeClamp;
tex_8u.filterMode = hipFilterModePoint;
tex_8u.normalized = false;
dim3 gridDim; gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
hipBindTexture2D( 0, &tex_8u, gKeypoints[i].scale3, &channelDesc,
_width/div, _height/div, gKeypoints[i].ipitch );
hipLaunchKernelGGL(( saliency_3), dim3(gridDim),dim3(_blockDim), 0, 0, gKeypoints[i].saliency,
_width/div, _height/div,
gKeypoints[i].fpitch/4, threshold);
hipBindTexture2D( 0, &tex_8u, gKeypoints[i].scale6, &channelDesc,
_width/div, _height/div, gKeypoints[i].ipitch );
hipLaunchKernelGGL(( saliency_6), dim3(gridDim),dim3(_blockDim), 0, 0, gKeypoints[i].saliency,
_width/div, _height/div,
gKeypoints[i].fpitch/4, threshold);
}
// Non max suppression
channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
tex_32f.addressMode[0] = hipAddressModeClamp;
tex_32f.addressMode[1] = hipAddressModeClamp;
tex_32f.filterMode = hipFilterModePoint;
tex_32f.normalized = false;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
hipBindTexture2D( 0, &tex_32f, gKeypoints[i].saliency, &channelDesc,
_width/div, _height/div, gKeypoints[i].fpitch );
hipLaunchKernelGGL(( nonmax_sup), dim3(gridDim),dim3(_blockDim), 0, 0, gKeypoints[i].keypoints_img, _width/div, _height/div,
gKeypoints[i].fpitch);
// Add new keypoints to optical flow
gridDim.x = GLOBAL_SIZE( _width/(2*div), _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/(2*div), _blockDim.y );
// add_even<<< gridDim, _blockDim>>>();
// add_odd<<< gridDim, _blockDim>>>();
}
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
_blockDim.y = 16;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
_blockDim.y = 8;
// Count points
hipLaunchKernelGGL(( count_points), dim3(gridDim),dim3(_blockDim), 0, 0, _opticalFlow[i].keypoints_img, gKeypoints[i].ptcount,
0.3f, _opticalFlow[i].keypoints_img_pitch);
// Accumulate points
hipMemcpy(gKeypoints[i].ptcount_h, gKeypoints[i].ptcount,
gridDim.x*gridDim.y,hipMemcpyDeviceToHost);
_opticalFlow[i].nkeypoints = 0;
gKeypoints[i].idxvector_h[0] = 0;
for( int j=0; j<gridDim.x*gridDim.y; ++j ) {
_opticalFlow[i].nkeypoints += gKeypoints[i].ptcount_h[j];
gKeypoints[i].idxvector_h[j+1] = _opticalFlow[i].nkeypoints;
}
hipMemcpy(gKeypoints[i].idxvector, gKeypoints[i].idxvector_h,
(gridDim.x*gridDim.y+1)*sizeof(int), hipMemcpyHostToDevice );
// Create points
hipLaunchKernelGGL(( createPointList), dim3(gridDim),dim3(_blockDim), 0, 0, _opticalFlow[i].keypoints_img, gKeypoints[i].idxvector,
_opticalFlow[i].keypoints, _opticalFlow[i].keypoints_img_pitch);
hipMemcpy( gKeypoints[i].featurepts_h, _opticalFlow[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,hipMemcpyDeviceToHost);
}
return hipSuccess;
}
} // namespace pvcore
| 816fbf7e1e997c93a1e1bfd1df2cbbfbb77ca827.cu | // ====================================================================== //
// pvcore -- simple parallel computer vision library
// Copyright (C) 2012 Niklas Bergström
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// ====================================================================== //
///////////////////////////////////////////////
/////////////////// CONSTANTS /////////////////
///////////////////////////////////////////////
#include "pvcore/keypoint.h"
#include "pvcore/cuda_functions.h"
#include "pvcore/common.h"
#include <math_constants.h>
texture<unsigned char, cudaTextureType2D, cudaReadModeElementType> tex_8u;
texture<unsigned int, cudaTextureType2D, cudaReadModeElementType> tex_32u;
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_32f;
texture<unsigned int, cudaTextureType2D, cudaReadModeElementType> tex_32u_0;
texture<unsigned int, cudaTextureType2D, cudaReadModeElementType> tex_32u_1;
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> tex_8u4_0;
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> tex_8u4_1;
__global__ void saliency_3(float* _dest,
int _width,
int _height,
int _pitchd,
float _threshold) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
if ( y < _height && x < _width ) {
int ip = tex2D(tex_8u,x,y);
// Get local contrast
int contrast = abs(ip - tex2D(tex_8u,x,y-3));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+1,y-3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y-1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+3,y+1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+1,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-1,y+3)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y+1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-3,y-1)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-1,y-3)));
float contrastf = contrast * (1.f/255.f);
int saliency = abs(tex2D(tex_8u,x,y-3) + tex2D(tex_8u,x,y+3) - 2*ip);
saliency = min(saliency,abs(tex2D(tex_8u,x+1,y-3) + tex2D(tex_8u,x-1,y+3) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y-2) + tex2D(tex_8u,x-2,y+2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y-1) + tex2D(tex_8u,x-3,y+1) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y) + tex2D(tex_8u,x-3,y) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+3,y+1) + tex2D(tex_8u,x-3,y-1) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y+2) + tex2D(tex_8u,x-2,y-2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+1,y+3) + tex2D(tex_8u,x-1,y-3) - 2*ip));
float saliencyf = saliency * (1.f/255.f);
_dest[y*_pitchd+x] = contrastf < _threshold ? 0.0f : saliencyf/contrastf;
}
}
__global__ void saliency_6(float* _dest,
int _width,
int _height,
int _pitchd,
float _threshold) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
if ( y < _height && x < _width ) {
int ip = tex2D(tex_8u,x,y);
// Get local contrast
int contrast = abs(ip - tex2D(tex_8u,x,y-6));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y-6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+4,y-4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y )));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+6,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+4,y+4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x+2,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x ,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y+6)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-4,y+4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y+2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y )));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-6,y-2)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-4,y-4)));
contrast = max(contrast,abs(ip - tex2D(tex_8u,x-2,y-6)));
float contrastf = contrast * (1.f/255.f);
int saliency = abs(tex2D(tex_8u,x,y-6) + tex2D(tex_8u,x,y+6) - 2*ip);
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y-6) + tex2D(tex_8u,x-2,y+6) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+4,y-4) + tex2D(tex_8u,x-4,y+4) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y-2) + tex2D(tex_8u,x-6,y+2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y) + tex2D(tex_8u,x-6,y) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+6,y+2) + tex2D(tex_8u,x-6,y-2) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+4,y+4) + tex2D(tex_8u,x-4,y-4) - 2*ip));
saliency = min(saliency,abs(tex2D(tex_8u,x+2,y+6) + tex2D(tex_8u,x-2,y-6) - 2*ip));
float saliencyf = saliency * (1.f/255.f);
float res = contrastf < _threshold ? 0.0f : saliencyf/contrastf;
_dest[y*_pitchd+x] = fmax(_dest[y*_pitchd+x],res);
}
}
__global__ void nonmax_sup(float* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
float pixelval = tex2D(tex_32f,x,y);
float tmp = tex2D(tex_32f,x-1,y-1);
tmp = fmax(tmp,tex2D(tex_32f,x ,y-1));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y-1));
tmp = fmax(tmp,tex2D(tex_32f,x-1,y));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y));
tmp = fmax(tmp,tex2D(tex_32f,x-1,y+1));
tmp = fmax(tmp,tex2D(tex_32f,x ,y+1));
tmp = fmax(tmp,tex2D(tex_32f,x+1,y+1));
_dest[y*_pitchd+x] = (pixelval >= tmp ? (pixelval > 0.25 ? pixelval : 0.0f) : 0.0f);
}
/**
* \brief Extracts features for all points in image tex_32u
*
* \param _dest Destination image of feature points (unsigned char [16])
* \param _width Width of image
* \param _height Height of image
* \param _pitch Elements per row of _dest
*/
__global__ void weak_features_half(unsigned int* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
// Allocate shared memory for 4x16x16 16-byte feature vectors, and add
// extra byte for padding of
__shared__ unsigned int shm[4*4*16*16+16*16];
// Read contributions for eight pixels for four consecutive pixels
unsigned int s0 = (tex2D(tex_32u, x , y-3) & 0xF0F0F0F0) >> 4;
unsigned int s1 = (tex2D(tex_32u, x+1, y-3) & 0xF0F0F0F0) >> 4;
unsigned int s2 = (tex2D(tex_32u, x+2, y-2) & 0xF0F0F0F0) >> 4;
unsigned int s3 = (tex2D(tex_32u, x+3, y-1) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x+3, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x+3, y+1) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x+2, y+2) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x+1, y+3) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+4] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+8] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+12] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y+3) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x-1, y+3) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x-2, y+2) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x-3, y+1) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x-3, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x-3, y-1) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x-2, y-2) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x-1, y-3) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+1] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+5] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+9] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+13] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y-6) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x+2, y-6) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x+4, y-4) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x+6, y-2) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x+6, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x+6, y+2) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x+4, y+4) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x+2, y+6) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+2] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+6] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+10] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+14] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = (tex2D(tex_32u, x , y+6) & 0xF0F0F0F0) >> 4;
s1 = (tex2D(tex_32u, x-2, y+6) & 0xF0F0F0F0) >> 4;
s2 = (tex2D(tex_32u, x-4, y+4) & 0xF0F0F0F0) >> 4;
s3 = (tex2D(tex_32u, x-6, y+2) & 0xF0F0F0F0) >> 4;
s0 |= tex2D(tex_32u, x-6, y ) & 0xF0F0F0F0;
s1 |= tex2D(tex_32u, x-6, y-2) & 0xF0F0F0F0;
s2 |= tex2D(tex_32u, x-4, y-4) & 0xF0F0F0F0;
s3 |= tex2D(tex_32u, x-2, y-6) & 0xF0F0F0F0;
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+3] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+7] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+11] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+15] = s3;
__syncthreads();
// Store shared memory in _dest
#pragma unroll
for (int i=0; i<16; ++i) {
_dest[y*_pitchd+16*blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x] = shm[17*16*threadIdx.y + 17*i + threadIdx.x];
}
}
__global__ void weak_features_full(unsigned int* _dest,
int _width,
int _height,
int _pitchd) {
int y = blockDim.y*blockIdx.y + threadIdx.y;
int x = blockDim.x*blockIdx.x + threadIdx.x;
// Allocate shared memory for 4x16x16 16-byte feature vectors, and add
// extra byte for padding of
__shared__ unsigned int shm[4*4*16*16+16*16];
// Read contributions for eight pixels for four consecutive pixels
unsigned int s0 = tex2D(tex_32u, x , y-3);
unsigned int s1 = tex2D(tex_32u, x+2, y-2);
unsigned int s2 = tex2D(tex_32u, x+3, y );
unsigned int s3 = tex2D(tex_32u, x+2, y+2);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+4] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+8] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+12] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y+3);
s1 = tex2D(tex_32u, x-2, y+2);
s2 = tex2D(tex_32u, x-3, y );
s3 = tex2D(tex_32u, x-2, y-2);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+1] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+5] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+9] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+13] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y-6);
s1 = tex2D(tex_32u, x+4, y-4);
s2 = tex2D(tex_32u, x+6, y );
s3 = tex2D(tex_32u, x+4, y+4);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+2] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+6] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+10] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+14] = s3;
// Read contributions for eight pixels for four consecutive pixels
s0 = tex2D(tex_32u, x , y+6);
s1 = tex2D(tex_32u, x-4, y+4);
s2 = tex2D(tex_32u, x-6, y );
s3 = tex2D(tex_32u, x-4, y-4);
// After reading the contributions are stored in columns of bytes
// in the unsigned integers. We need to transpose
transpose(&s0,&s1,&s2,&s3);
// Store 4 feature vectors in every row of shm
shm[16*threadIdx.y*17+17*threadIdx.x+3] = s0;
shm[16*threadIdx.y*17+17*threadIdx.x+7] = s1;
shm[16*threadIdx.y*17+17*threadIdx.x+11] = s2;
shm[16*threadIdx.y*17+17*threadIdx.x+15] = s3;
__syncthreads();
// Store shared memory in _dest
#pragma unroll
for (int i=0; i<16; ++i) {
_dest[y*_pitchd+16*blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x] = shm[17*16*threadIdx.y + 17*i + threadIdx.x];
}
}
__global__ void count_points(float* _pts,
unsigned char* _count,
float _threshold,
int _pitch) {
__shared__ unsigned char shm[16*8];
// Global indices
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = 2*blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*_pitch + x;
int reductionoffset = 8*_pitch;
// Thread block indices
int shmidx = threadIdx.y*16+threadIdx.x;
// Read two bytes per thread
shm[shmidx] = (_pts[idx] > _threshold ? 1 : 0) + (_pts[idx+reductionoffset] > _threshold ? 1 : 0);
__syncthreads();
// 64 numbers to add
if( shmidx < 64 ) {
shm[shmidx] += shm[shmidx+64];
__syncthreads();
if( shmidx < 32 ) {
// 64 numbers to add
shm[shmidx] += shm[shmidx+32];
__syncthreads();
if( shmidx < 16 ) {
// 32 numbers to add
shm[shmidx] += shm[shmidx+16];
if( shmidx < 8 ) {
// 16 numbers to add
shm[shmidx] += shm[shmidx+8];
if( shmidx < 4 ) {
// 8 numbers to add
shm[shmidx] += shm[shmidx+4];
if( shmidx < 2 ) {
// 4 numbers to add
shm[shmidx] += shm[shmidx+2];
if( shmidx == 0 ) {
// 2 numbers to add
_count[blockIdx.y*gridDim.x+blockIdx.x] = (shm[0] + shm[1]);
}
}
}
}
}
}
}
}
// _edges holds the binary image
// _ptsperblock holds the block's index of the point list and the number of points
__global__ void createPointList(float* _src,
int* _ptsperblock,
pvcore::keypt* _pointlist,
int _pitch) {
// Index of the block - will give how many points should be stored in _pointlist
int blockidx = blockIdx.y*gridDim.x + blockIdx.x;
int ptidx = _ptsperblock[blockidx];
// Number of points to save
int npts = _ptsperblock[blockidx+1] - ptidx;
// Thread block indices (0-127)
int shmidx = threadIdx.y*16 + threadIdx.x;
int direction = threadIdx.y*16 + threadIdx.x;
// No need to sort if no points in block
if( npts == 0 ) {
return;
}
int reductionoffset = 8*_pitch;
__shared__ float shm_points[16*17];
__shared__ unsigned char ptlist[16*17];
// Indices to global point
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = 2*blockIdx.y * blockDim.y + threadIdx.y;
int idx = y*_pitch + x;
// Shared memory holding a copy of the feature point image
shm_points[shmidx] = _src[idx];
shm_points[shmidx+128] = _src[idx+reductionoffset];
// Shared memory holding the (local) indices of the feature points
ptlist[shmidx] = shmidx;
ptlist[shmidx+128] = shmidx+128;
__syncthreads();
// Sort 256 numbers, largest first !!POTENTIAL BANK CONFLICTS!!
// Implements bitonic merge sort
// Loop
for( int i=1; i < 256; i <<= 1 ) {
// Outer loop determines sorting direction
int sortdir = (direction & i) > 0 ? 0 : 1;
// Inner loop
for( int j=i; j > 0; j >>= 1 ) {
// New index
int mask = 0x0FFFFFFF * j;
int tidx = ((shmidx&mask) << 1) + (shmidx & ~mask);
atomicSort(shm_points,ptlist,tidx,j,j*sortdir);
__syncthreads();
}
}
// Save points in list
if( shmidx < npts ) {
_pointlist[ptidx + shmidx].x = blockIdx.x * blockDim.x + (ptlist[shmidx] & 0x0F);
_pointlist[ptidx + shmidx].y = 2*blockIdx.y * blockDim.y + ((ptlist[shmidx] & 0xF0) >> 4);
}
// _edges[idx] = shm_points[shmidx];
// _edges[idx+reductionoffset] = shm_points[shmidx+128];
}
__device__ inline int bin_dist(unsigned int* _src,
unsigned int* _ref) {
int t = __popc( _src[0] ^ _ref[0] );
t += __popc( _src[1] ^ _ref[1] );
t += __popc( _src[2] ^ _ref[2] );
t += __popc( _src[3] ^ _ref[3] );
return t;
}
__device__ inline unsigned long L2_dist(uchar4* _src,
uchar4* _ref,
int _npts) {
unsigned long sum = 0;
for (int i=0; i<_npts; ++i) {
sum += (_src[i].x-_ref[i].x) * (_src[i].x-_ref[i].x);
sum += (_src[i].y-_ref[i].y) * (_src[i].y-_ref[i].y);
sum += (_src[i].z-_ref[i].z) * (_src[i].z-_ref[i].z);
sum += (_src[i].w-_ref[i].w) * (_src[i].w-_ref[i].w);
}
return sum;
}
template <int _area>
__global__ void best_L2_match(const pvcore::keypt* _srcpts,
pvcore::keypt* _destpts) {
pvcore::keypt pt = _srcpts[blockDim.x*blockIdx.x + threadIdx.x];
uchar4 src[4], ref[4];
ref[0] = tex2D(tex_8u4_0, 4*pt.x, pt.y);
ref[1] = tex2D(tex_8u4_0, 4*pt.x+1, pt.y);
ref[2] = tex2D(tex_8u4_0, 4*pt.x+2, pt.y);
ref[3] = tex2D(tex_8u4_0, 4*pt.x+3, pt.y);
pvcore::keypt destpt;
destpt.x = pt.x;
destpt.y = pt.y;
unsigned long dist = 255*255*16+1;
#pragma unroll
for (int y=-_area; y<=_area; ++y) {
unsigned long t;
#pragma unroll
for (int x=-_area; x<=_area; ++x) {
src[0] = tex2D(tex_8u4_1, 4*(pt.x+x), pt.y+y);
src[1] = tex2D(tex_8u4_1, 4*(pt.x+x)+1, pt.y+y);
src[2] = tex2D(tex_8u4_1, 4*(pt.x+x)+2, pt.y+y);
src[3] = tex2D(tex_8u4_1, 4*(pt.x+x)+3, pt.y+y);
t = L2_dist(src,ref,4);
if (t < dist) {
dist = t;
destpt.x = pt.x+x;
destpt.y = pt.y+y;
}
}
}
_destpts[blockDim.x*blockIdx.x + threadIdx.x] = destpt;
}
template <int _area>
__global__ void best_bin_match(const pvcore::keypt* _srcpts,
pvcore::keypt* _destpts) {
pvcore::keypt pt = _srcpts[blockDim.x*blockIdx.x + threadIdx.x];
unsigned int src[4], ref[4];
ref[0] = tex2D(tex_32u_0, 4*pt.x, pt.y);
ref[1] = tex2D(tex_32u_0, 4*pt.x+1, pt.y);
ref[2] = tex2D(tex_32u_0, 4*pt.x+2, pt.y);
ref[3] = tex2D(tex_32u_0, 4*pt.x+3, pt.y);
pvcore::keypt destpt;
destpt.x = pt.x;
destpt.y = pt.y;
int dist = 129;
#pragma unroll
for (int y=-_area; y<=_area; ++y) {
#pragma unroll
for (int x=-_area; x<=_area; ++x) {
src[0] = tex2D(tex_32u_1, 4*(pt.x+x), pt.y+y);
src[1] = tex2D(tex_32u_1, 4*(pt.x+x)+1, pt.y+y);
src[2] = tex2D(tex_32u_1, 4*(pt.x+x)+2, pt.y+y);
src[3] = tex2D(tex_32u_1, 4*(pt.x+x)+3, pt.y+y);
int t = bin_dist(src,ref);
if (t <= dist) {
dist = t;
destpt.x = pt.x+x;
destpt.y = pt.y+y;
}
}
}
_destpts[blockDim.x*blockIdx.x + threadIdx.x] = destpt;
}
template <typename T>
__global__ void zero(T* _dest, int _pitch) {
_dest[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x] = (T)0;
}
__global__ void zeroVel(pvcore::keypt_vel* _vel, int _pitch) {
_vel[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x].x = 0.0f;
_vel[(blockDim.y*blockIdx.y+threadIdx.y)*_pitch + blockDim.x*blockIdx.x+threadIdx.x].y = 0.0f;
}
__global__ void fill_features(pvcore::keypt* _features,
float* _dest,
int _nfeatures,
int _pitchd) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if( idx < _nfeatures ) {
_dest[_features[idx].y*_pitchd + _features[idx].x] = 1.0f;
}
}
__global__ void predict_points(pvcore::keypt* _srcpt, pvcore::keypt_vel* _srcvel, pvcore::keypt* _destpt, int _npts) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < _npts) {
_destpt[idx].x = _srcpt[idx].x + _srcvel[idx].x;
_destpt[idx].y = _srcpt[idx].y + _srcvel[idx].y;
}
}
__global__ void add_even(const float* _src,
float* _dest,
int _width,
int _height,
int _pitchd) {
int y = (blockDim.y*blockIdx.y + threadIdx.y)*2;
int x = (blockDim.x*blockIdx.x + threadIdx.x)*2;
__shared__ float shm[18*18];
if (y>0 && y<_height-1 && x>0 && x<_width-1) {
float sum = 0.0f;
sum += tex2D( tex_32f, x-1, y-1);
sum += tex2D( tex_32f, x , y-1);
sum += tex2D( tex_32f, x+1, y-1);
sum += tex2D( tex_32f, x-1, y );
sum += tex2D( tex_32f, x , y );
sum += tex2D( tex_32f, x+1, y );
sum += tex2D( tex_32f, x-1, y+1);
sum += tex2D( tex_32f, x , y+1);
sum += tex2D( tex_32f, x+1, y+1);
if (sum == 0) {
_dest[y*_pitchd+x] = _src[y*_pitchd+x];
}
}
}
__global__ void add_odd(const float* _src,
float* _dest,
int _width,
int _height,
int _pitchd) {
int y = (blockDim.y*blockIdx.y + threadIdx.y)*2+1;
int x = (blockDim.x*blockIdx.x + threadIdx.x)*2+1;
__shared__ float shm[18*18];
if (y<_height-1 && x<_width-1) {
float sum = 0.0f;
sum += tex2D( tex_32f, x-1, y-1);
sum += tex2D( tex_32f, x , y-1);
sum += tex2D( tex_32f, x+1, y-1);
sum += tex2D( tex_32f, x-1, y );
sum += tex2D( tex_32f, x , y );
sum += tex2D( tex_32f, x+1, y );
sum += tex2D( tex_32f, x-1, y+1);
sum += tex2D( tex_32f, x , y+1);
sum += tex2D( tex_32f, x+1, y+1);
if (sum == 0) {
_dest[y*_pitchd+x] = _src[y*_pitchd+x];
}
}
}
namespace pvcore {
// The results from this method will differ. Consider implementing own
template <typename T>
cudaError_t __resizeGPU(const T* _src,
T* _dest,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _pitchd,
double _scale) {
NppiSize osz; osz.width = _width; osz.height = _height;
NppiRect oroi; oroi.x = 0; oroi.y = 0; oroi.width = _width; oroi.height = _height;
NppiRect droi; droi.x = 1; droi.y = 1; droi.width = _width*_scale; droi.height = _height*_scale;
switch (sizeof(T)) {
case 4:
if (_pitchs/_width == 1) {
nppiResizeSqrPixel_32f_C1R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 3) {
nppiResizeSqrPixel_32f_C3R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 4) {
nppiResizeSqrPixel_32f_C4R((const Npp32f*)_src, osz, _pitchs*4, oroi, (Npp32f*)_dest, _pitchd*4, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
}
break;
case 1:
if (_pitchs/_width == 1) {
nppiResizeSqrPixel_8u_C1R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 3) {
nppiResizeSqrPixel_8u_C3R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
} else if (_pitchs/_width == 4) {
nppiResizeSqrPixel_8u_C4R((const Npp8u*)_src, osz, _pitchs, oroi, (Npp8u*)_dest, _pitchd, droi, _scale, _scale, 0, 0, NPPI_INTER_LINEAR);
}
break;
default:
break;
}
return cudaSuccess;
}
// The results from this method will differ. Consider implementing own
template <typename T>
cudaError_t __filterGaussGPU(const T* _src,
T* _dest,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _pitchd,
int _mask) {
NppiSize osz; osz.width = _width; osz.height = _height;
NppiMaskSize mask_size = NPP_MASK_SIZE_3_X_3;
if (_mask == 5) {
mask_size = NPP_MASK_SIZE_5_X_5;
}
switch (sizeof(T)) {
case 4:
if (_pitchs/_width == 1) {
nppiFilterGauss_32f_C1R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
} else if (_pitchs/_width == 3) {
nppiFilterGauss_32f_C3R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
} else if (_pitchs/_width == 4) {
nppiFilterGauss_32f_C4R((const Npp32f*)_src, _pitchs*4, (Npp32f*)_dest, _pitchd*4, osz, mask_size);
}
break;
case 1:
if (_pitchs/_width == 1) {
nppiFilterGauss_8u_C1R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
} else if (_pitchs/_width == 3) {
nppiFilterGauss_8u_C3R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
} else if (_pitchs/_width == 4) {
nppiFilterGauss_8u_C4R((const Npp8u*)_src, _pitchs, (Npp8u*)_dest, _pitchd, osz, mask_size);
}
break;
default:
break;
}
return cudaSuccess;
}
struct semiDenseKeypointStruct {
static int nscales;
static bool inited;
// Filtered source images
unsigned char* scale3;
unsigned char* scale6;
// Saliency image
float* saliency;
// Number of feature points in cuda grid
unsigned char* ptcount;
unsigned char* ptcount_h;
// Accumulated feature points in the grid
int* idxvector;
int* idxvector_h;
// Keypoints
float* keypoints_img;
keypt* keypoints;
int nkeypoints;
// Pitch for
size_t fpitch, ipitch;
size_t fpitch_h, ipitch_h;
pvcore::keypt* featurepts_h;
};
int semiDenseKeypointStruct::nscales = 1;
bool semiDenseKeypointStruct::inited = false;
semiDenseKeypointStruct* gKeypoints = NULL;
// Initializes device and host memory for semi-dense optical flow
cudaError_t __initKeypointBuffer(int _width, int _height, int _nscales, dim3 _blockDim) {
printf("initing keypoint buffer with %d scales\n",_nscales);
if (semiDenseKeypointStruct::inited) {
printf("Already inited\n");
return cudaSuccess;
}
semiDenseKeypointStruct::nscales = _nscales;
gKeypoints = new semiDenseKeypointStruct[_nscales];
dim3 gridDim; gridDim.z = 1;
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
size_t ipitch, fpitch;
cudaMallocPitch((void**)&gKeypoints[i].scale3, &ipitch, _width/div, _height/div);
cudaMallocPitch((void**)&gKeypoints[i].scale6, &ipitch, _width/div, _height/div);
cudaMallocPitch((void**)&gKeypoints[i].saliency, &fpitch, 4*_width/div, _height/div);
cudaMallocPitch((void**)&gKeypoints[i].keypoints_img, &fpitch, 4*_width/div, _height/div);
cudaMalloc((void**)&gKeypoints[i].keypoints, 4*_width*_height/(div*div) );
gKeypoints[i].nkeypoints = 0;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
cudaMalloc((void**)&gKeypoints[i].ptcount, gridDim.x*gridDim.y);
gKeypoints[i].ptcount_h = new unsigned char[gridDim.x*gridDim.y];
cudaMalloc((void**)&gKeypoints[i].idxvector, (gridDim.x*gridDim.y+1)*sizeof(int));
gKeypoints[i].idxvector_h = new int[gridDim.x*gridDim.y+1];
gKeypoints[i].featurepts_h = new pvcore::keypt[_width/div*_height/div];
gKeypoints[i].ipitch = ipitch;
gKeypoints[i].fpitch = fpitch;
}
semiDenseKeypointStruct::inited = true;
return cudaSuccess;
}
// Releases device and host memory for semi-dense optical flow
void __freeKeypointBuffer() {
for( int i=0; i<semiDenseKeypointStruct::nscales; ++i ) {
cudaFree(gKeypoints[i].scale3);
cudaFree(gKeypoints[i].scale6);
cudaFree(gKeypoints[i].saliency);
cudaFree(gKeypoints[i].ptcount);
delete [] gKeypoints[i].ptcount_h;
cudaFree(gKeypoints[i].idxvector);
delete [] gKeypoints[i].idxvector_h;
delete [] gKeypoints[i].featurepts_h;
}
semiDenseKeypointStruct::inited = false;
}
cudaError_t __initOpticalFlowStructGPU(opticalFlowStruct* _src,
int _width,
int _height,
int _nscales) {
//*_src = new opticalFlowStruct[_nscales];
cudaError_t err = cudaSuccess;
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
// Init keypoint struct (Maximum number of keypoints is as many as there are pixels in the image)
err = cudaMalloc((void**)&(_src[i].keypoints), sizeof(keypt)*_width/div*_height/div);
err = cudaMalloc((void**)&(_src[i].keypoints_vel), sizeof(keypt_vel)*_width/div*_height/div);
_src[i].nkeypoints = 0;
err = cudaMallocPitch((void**)&(_src[i].keypoints_img),
&(_src[i].keypoints_img_pitch),
_width*sizeof(float)/div, _height/div);
_src[i].keypoints_img_pitch /= sizeof(float);
err = cudaMallocPitch((void**)&(_src[i].features), &(_src[i].features_pitch),
_width*sizeof(featurept)/div, _height/div);
_src[i].features_pitch /= sizeof(featurept);
dim3 blockDim; blockDim.x = blockDim.y = 16; blockDim.z = 1;
dim3 gridDim; gridDim.z = 1;
gridDim.x = GLOBAL_SIZE(_width/div,blockDim.x);
gridDim.y = GLOBAL_SIZE(_height/div,blockDim.y);
zeroVel<<<gridDim,blockDim>>>(_src[i].keypoints_vel,_width/div);
}
return err;
}
cudaError_t __initPredictionStructGPU(predictionStruct* _src,
int _width,
int _height,
int _nscales) {
//*_src = new predictionStruct[_nscales];
for( int i=0; i<_nscales; ++i ) {
int div = 1 << i;
cudaMalloc((void**)&_src[i].keypoints,_width/div*_height/div*sizeof(keypt));
_src[i].nkeypoints = 0;
cudaMallocPitch((void**)&_src[i].features, &_src[i].features_pitch,
_width*sizeof(featurept)/div, _height/div);
_src[i].features_pitch /= sizeof(featurept);
}
return cudaSuccess;
}
cudaError_t __matchGPU(opticalFlowStruct* _opticalFlow,
predictionStruct* _prediction,
int _width, int _height, dim3 _blockDim) {
_blockDim.y = 1;
_blockDim.x = 128;
int nscales = semiDenseKeypointStruct::nscales;
dim3 gridDim; gridDim.y = gridDim.z = 1;
// Features
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8,8,8,8,cudaChannelFormatKindUnsigned);
tex_8u4_0.addressMode[0] = cudaAddressModeClamp;
tex_8u4_0.addressMode[1] = cudaAddressModeClamp;
tex_8u4_0.filterMode = cudaFilterModePoint;
tex_8u4_0.normalized = false;
tex_8u4_1.addressMode[0] = cudaAddressModeClamp;
tex_8u4_1.addressMode[1] = cudaAddressModeClamp;
tex_8u4_1.filterMode = cudaFilterModePoint;
tex_8u4_1.normalized = false;
// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindUnsigned);
// tex_32u_0.addressMode[0] = cudaAddressModeClamp;
// tex_32u_0.addressMode[1] = cudaAddressModeClamp;
// tex_32u_0.filterMode = cudaFilterModePoint;
// tex_32u_0.normalized = false;
// tex_32u_1.addressMode[0] = cudaAddressModeClamp;
// tex_32u_1.addressMode[1] = cudaAddressModeClamp;
// tex_32u_1.filterMode = cudaFilterModePoint;
// tex_32u_1.normalized = false;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
if (_opticalFlow[i].nkeypoints == 0) {
continue;
}
gridDim.x = GLOBAL_SIZE(_opticalFlow[i].nkeypoints, _blockDim.x);
cudaMemcpy( gKeypoints[i].featurepts_h, _prediction[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,cudaMemcpyDeviceToHost);
printf("#npoints: %d\n",_opticalFlow[i].nkeypoints);
for (int j=0; j<10; ++j) {
printf("kp %d before: %d %d\n",j,gKeypoints[i].featurepts_h[j].x,gKeypoints[i].featurepts_h[j].y);
}
cudaBindTexture2D( 0, &tex_8u4_0, _prediction[i].features, &channelDesc,
_width/(4*div), _height/div,
_prediction[i].featuresBytesPerRow()/4);
cudaBindTexture2D( 0, &tex_8u4_1, _prediction[i].features, &channelDesc,
_width/(4*div), _height/div,
_prediction[i].featuresBytesPerRow()/4);
best_L2_match<3><<<gridDim,_blockDim>>>(_prediction[i].keypoints, _opticalFlow[i].keypoints);
// cudaBindTexture2D( 0, &tex_32u_0, _opticalFlow[i].features, &channelDesc,
// _width/(4*div), _height/div,
// _prediction[i].featuresBytesPerRow()/4);
// cudaBindTexture2D( 0, &tex_32u_1, _prediction[i].features, &channelDesc,
// _width/(4*div), _height/div,
// _prediction[i].featuresBytesPerRow()/4);
// best_bin_match<3><<<gridDim,_blockDim>>>(_prediction[i].keypoints, _opticalFlow[i].keypoints);
cudaMemcpy( gKeypoints[i].featurepts_h, _opticalFlow[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,cudaMemcpyDeviceToHost);
for (int j=0; j<10; ++j) {
printf("kp %d after: %d %d\n",j,gKeypoints[i].featurepts_h[j].x,gKeypoints[i].featurepts_h[j].y);
}
// Now we have matched, so swap features in prediction and optical flow
featurept* tmp = _opticalFlow[i].features;
_opticalFlow[i].features = _prediction[i].features;
_prediction[i].features = tmp;
}
return cudaSuccess;
}
cudaError_t __predictPointsGPU(opticalFlowStruct* _opticalFlow,
predictionStruct* _prediction,
int _width, int _height, dim3 _blockDim) {
_blockDim.y = 1;
_blockDim.x = 128;
int nscales = semiDenseKeypointStruct::nscales;
dim3 gridDim; gridDim.y = gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
if (_opticalFlow[i].nkeypoints == 0) {
continue;
}
gridDim.x = GLOBAL_SIZE(_opticalFlow[i].nkeypoints, _blockDim.x);
predict_points<<<gridDim,_blockDim>>>(_opticalFlow[i].keypoints, _opticalFlow[i].keypoints_vel,
_prediction[i].keypoints, _opticalFlow[i].nkeypoints);
_prediction[i].nkeypoints = _opticalFlow[i].nkeypoints;
}
return cudaSuccess;
}
cudaError_t __generateImagePyramid(const unsigned char* _src,
unsigned int _width,
unsigned int _height,
unsigned int _pitchs,
unsigned int _nscales,
dim3 _blockDim) {
if (!semiDenseKeypointStruct::inited) {
__initKeypointBuffer(_width,_height,_nscales,_blockDim);
}
int nscales = semiDenseKeypointStruct::nscales;
// Resize and gauss filter to different levels
const unsigned char* timg = _src;
for (int i=1; i<nscales; ++i) {
__resizeGPU(timg, gKeypoints[i].scale3, _width,_height,
gKeypoints[i-1].ipitch, gKeypoints[i].ipitch,0.5);
timg = gKeypoints[i].scale3;
}
__filterGaussGPU(_src,gKeypoints[0].scale3, _width, _height, gKeypoints[0].ipitch,gKeypoints[0].ipitch,3);
__filterGaussGPU(gKeypoints[0].scale3, gKeypoints[0].scale6, _width, _height,
gKeypoints[0].ipitch, gKeypoints[0].ipitch,3);
for (int i=1; i<nscales; ++i) {
int div = 1 << i;
__filterGaussGPU(gKeypoints[i].scale3, gKeypoints[i].scale3, _width/div,_height/div,
gKeypoints[i].ipitch, gKeypoints[i].ipitch,3);
__filterGaussGPU(gKeypoints[i].scale3, gKeypoints[i].scale6, _width/div,_height/div,
gKeypoints[i].ipitch, gKeypoints[i].ipitch,3);
}
return cudaSuccess;
}
cudaError_t __extractFeaturesGPU(predictionStruct* _prediction,
int _width,
int _height,
dim3 _blockDim) {
int nscales = semiDenseKeypointStruct::nscales;
// Features
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindUnsigned);
tex_32u.addressMode[0] = cudaAddressModeClamp;
tex_32u.addressMode[1] = cudaAddressModeClamp;
tex_32u.filterMode = cudaFilterModePoint;
tex_32u.normalized = false;
dim3 gridDim; gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/(div*4), _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
cudaBindTexture2D( 0, &tex_32u, gKeypoints[i].scale3, &channelDesc,
_width/(4*div), _height/div, gKeypoints[i].ipitch );
weak_features_full<<<gridDim,_blockDim>>>((unsigned int*)_prediction[i].features,
_width/(div*4), _height/div, _prediction[i].featuresBytesPerRow()/4);
}
return cudaSuccess;
}
// Extracts keypoints semi-dense optical flow
cudaError_t __extractKeypointsGPU(opticalFlowStruct* _opticalFlow,
unsigned int _width,
unsigned int _height,
unsigned int _keypointType,
dim3 _blockDim) {
int nscales = semiDenseKeypointStruct::nscales;
// Compute saliency images
float threshold = (float)_keypointType*0.01f;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8,0,0,0,cudaChannelFormatKindUnsigned);
tex_8u.addressMode[0] = cudaAddressModeClamp;
tex_8u.addressMode[1] = cudaAddressModeClamp;
tex_8u.filterMode = cudaFilterModePoint;
tex_8u.normalized = false;
dim3 gridDim; gridDim.z = 1;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
cudaBindTexture2D( 0, &tex_8u, gKeypoints[i].scale3, &channelDesc,
_width/div, _height/div, gKeypoints[i].ipitch );
saliency_3<<< gridDim,_blockDim>>>(gKeypoints[i].saliency,
_width/div, _height/div,
gKeypoints[i].fpitch/4, threshold);
cudaBindTexture2D( 0, &tex_8u, gKeypoints[i].scale6, &channelDesc,
_width/div, _height/div, gKeypoints[i].ipitch );
saliency_6<<< gridDim,_blockDim>>>(gKeypoints[i].saliency,
_width/div, _height/div,
gKeypoints[i].fpitch/4, threshold);
}
// Non max suppression
channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
tex_32f.addressMode[0] = cudaAddressModeClamp;
tex_32f.addressMode[1] = cudaAddressModeClamp;
tex_32f.filterMode = cudaFilterModePoint;
tex_32f.normalized = false;
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
cudaBindTexture2D( 0, &tex_32f, gKeypoints[i].saliency, &channelDesc,
_width/div, _height/div, gKeypoints[i].fpitch );
nonmax_sup<<< gridDim,_blockDim>>>(gKeypoints[i].keypoints_img, _width/div, _height/div,
gKeypoints[i].fpitch);
// Add new keypoints to optical flow
gridDim.x = GLOBAL_SIZE( _width/(2*div), _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/(2*div), _blockDim.y );
// add_even<<< gridDim, _blockDim>>>();
// add_odd<<< gridDim, _blockDim>>>();
}
for (int i=0; i<nscales; ++i) {
int div = 1 << i;
_blockDim.y = 16;
gridDim.x = GLOBAL_SIZE( _width/div, _blockDim.x );
gridDim.y = GLOBAL_SIZE( _height/div, _blockDim.y );
_blockDim.y = 8;
// Count points
count_points<<<gridDim,_blockDim>>>(_opticalFlow[i].keypoints_img, gKeypoints[i].ptcount,
0.3f, _opticalFlow[i].keypoints_img_pitch);
// Accumulate points
cudaMemcpy(gKeypoints[i].ptcount_h, gKeypoints[i].ptcount,
gridDim.x*gridDim.y,cudaMemcpyDeviceToHost);
_opticalFlow[i].nkeypoints = 0;
gKeypoints[i].idxvector_h[0] = 0;
for( int j=0; j<gridDim.x*gridDim.y; ++j ) {
_opticalFlow[i].nkeypoints += gKeypoints[i].ptcount_h[j];
gKeypoints[i].idxvector_h[j+1] = _opticalFlow[i].nkeypoints;
}
cudaMemcpy(gKeypoints[i].idxvector, gKeypoints[i].idxvector_h,
(gridDim.x*gridDim.y+1)*sizeof(int), cudaMemcpyHostToDevice );
// Create points
createPointList<<<gridDim,_blockDim>>>(_opticalFlow[i].keypoints_img, gKeypoints[i].idxvector,
_opticalFlow[i].keypoints, _opticalFlow[i].keypoints_img_pitch);
cudaMemcpy( gKeypoints[i].featurepts_h, _opticalFlow[i].keypoints,
sizeof(pvcore::keypt)*_opticalFlow[i].nkeypoints,cudaMemcpyDeviceToHost);
}
return cudaSuccess;
}
} // namespace pvcore
|
80a9d9f492ca0835632ed8575b931d86356bd7dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Vector addition: C = 1/A + 1/B.
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O2 -m64 -o vecAdd vecAdd.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O2 -m64 -o vecAdd vecAdd.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
float* d_A; // device vectors
float* d_B;
float* d_C;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = 1.0/A[i] + 1.0/B[i];
__syncthreads();
}
// Host code
int main( )
{
int gid;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
scanf("%d",&gid);
err = hipSetDevice(gid);
if (err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
hipSetDevice(gid);
printf("Vector Addition: C = 1/A + 1/B\n");
int mem = 1024*1024*1024; // Giga
int N;
printf("Enter the size of the vectors: ");
scanf("%d",&N);
printf("%d\n",N);
if( 3*N > mem ) { // each real number takes 4 bytes
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(2);
}
long size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize the input vectors with random numbers
RandomInit(h_A, N);
RandomInit(h_B, N);
// Set the sizes of threads and blocks
int threadsPerBlock;
loop:
printf("Enter the number of threads per block: ");
scanf("%d",&threadsPerBlock);
printf("%d\n",threadsPerBlock);
if( threadsPerBlock > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
goto loop;
}
int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
printf("The number of blocks is %d\n", blocksPerGrid);
if( blocksPerGrid > 2147483647 ) {
printf("The number of blocks must be less than 2147483647 ! \n");
goto loop;
}
// create the timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start the timer
hipEventRecord(start,0);
// Allocate vectors in device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Intime;
hipEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
hipEventRecord(start,0);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float gputime;
daEventElapsedTime( &gputime, start, stop);cu
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
hipEventRecord(start,0);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Outime;
hipEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
hipEventRecord(start,0);
h_D = (float*)malloc(size); // to compute the reference solution
for (int i = 0; i < N; ++i)
h_D[i] = 1.0/h_A[i] + 1.0/h_B[i];
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float cputime;
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
// check result
printf("Check result:\n");
double sum=0;
double diff;
for (int i = 0; i < N; ++i) {
diff = abs(h_D[i] - h_C[i]);
sum += diff*diff;
// if(diff > 1.0e-15) {
// printf("i=%d, h_D=%15.10e, h_C=%15.10e \n", i, h_D[i], h_C[i]);
// }
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
hipDeviceReset();
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for(int i = 0; i< n; i++)
data[i] = rand() / (float)RAND_MAX;
}
| 80a9d9f492ca0835632ed8575b931d86356bd7dd.cu | // Vector addition: C = 1/A + 1/B.
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O2 -m64 -o vecAdd vecAdd.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O2 -m64 -o vecAdd vecAdd.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
float* d_A; // device vectors
float* d_B;
float* d_C;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = 1.0/A[i] + 1.0/B[i];
__syncthreads();
}
// Host code
int main( )
{
int gid;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
scanf("%d",&gid);
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Vector Addition: C = 1/A + 1/B\n");
int mem = 1024*1024*1024; // Giga
int N;
printf("Enter the size of the vectors: ");
scanf("%d",&N);
printf("%d\n",N);
if( 3*N > mem ) { // each real number takes 4 bytes
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(2);
}
long size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize the input vectors with random numbers
RandomInit(h_A, N);
RandomInit(h_B, N);
// Set the sizes of threads and blocks
int threadsPerBlock;
loop:
printf("Enter the number of threads per block: ");
scanf("%d",&threadsPerBlock);
printf("%d\n",threadsPerBlock);
if( threadsPerBlock > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
goto loop;
}
int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
printf("The number of blocks is %d\n", blocksPerGrid);
if( blocksPerGrid > 2147483647 ) {
printf("The number of blocks must be less than 2147483647 ! \n");
goto loop;
}
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord(start,0);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Intime;
cudaEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
cudaEventRecord(start,0);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float gputime;
daEventElapsedTime( &gputime, start, stop);cu
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
cudaEventRecord(start,0);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Outime;
cudaEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
cudaEventRecord(start,0);
h_D = (float*)malloc(size); // to compute the reference solution
for (int i = 0; i < N; ++i)
h_D[i] = 1.0/h_A[i] + 1.0/h_B[i];
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// check result
printf("Check result:\n");
double sum=0;
double diff;
for (int i = 0; i < N; ++i) {
diff = abs(h_D[i] - h_C[i]);
sum += diff*diff;
// if(diff > 1.0e-15) {
// printf("i=%d, h_D=%15.10e, h_C=%15.10e \n", i, h_D[i], h_C[i]);
// }
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
cudaDeviceReset();
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for(int i = 0; i< n; i++)
data[i] = rand() / (float)RAND_MAX;
}
|
4a202010ae71c6bb354bd33d0771fc1c1b66bbe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "deconvolutional_layer.h"
#include "maxpool_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
if(l.type == CONVOLUTIONAL){
forward_convolutional_layer_gpu(l, state);
} else if(l.type == DECONVOLUTIONAL){
forward_deconvolutional_layer_gpu(l, state);
} else if(l.type == ACTIVE){
forward_activation_layer_gpu(l, state);
} else if(l.type == LOCAL){
forward_local_layer_gpu(l, state);
} else if(l.type == DETECTION){
forward_detection_layer_gpu(l, state);
} else if(l.type == CONNECTED){
forward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
forward_rnn_layer_gpu(l, state);
} else if(l.type == CRNN){
forward_crnn_layer_gpu(l, state);
} else if(l.type == CROP){
forward_crop_layer_gpu(l, state);
} else if(l.type == COST){
forward_cost_layer_gpu(l, state);
} else if(l.type == SOFTMAX){
forward_softmax_layer_gpu(l, state);
} else if(l.type == NORMALIZATION){
forward_normalization_layer_gpu(l, state);
} else if(l.type == MAXPOOL){
forward_maxpool_layer_gpu(l, state);
} else if(l.type == AVGPOOL){
forward_avgpool_layer_gpu(l, state);
} else if(l.type == DROPOUT){
forward_dropout_layer_gpu(l, state);
} else if(l.type == ROUTE){
forward_route_layer_gpu(l, net);
} else if(l.type == SHORTCUT){
forward_shortcut_layer_gpu(l, state);
}
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
if(l.type == CONVOLUTIONAL){
backward_convolutional_layer_gpu(l, state);
} else if(l.type == DECONVOLUTIONAL){
backward_deconvolutional_layer_gpu(l, state);
} else if(l.type == ACTIVE){
backward_activation_layer_gpu(l, state);
} else if(l.type == LOCAL){
backward_local_layer_gpu(l, state);
} else if(l.type == MAXPOOL){
if(i != 0) backward_maxpool_layer_gpu(l, state);
} else if(l.type == AVGPOOL){
if(i != 0) backward_avgpool_layer_gpu(l, state);
} else if(l.type == DROPOUT){
backward_dropout_layer_gpu(l, state);
} else if(l.type == DETECTION){
backward_detection_layer_gpu(l, state);
} else if(l.type == NORMALIZATION){
backward_normalization_layer_gpu(l, state);
} else if(l.type == SOFTMAX){
if(i != 0) backward_softmax_layer_gpu(l, state);
} else if(l.type == CONNECTED){
backward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
backward_rnn_layer_gpu(l, state);
} else if(l.type == CRNN){
backward_crnn_layer_gpu(l, state);
} else if(l.type == COST){
backward_cost_layer_gpu(l, state);
} else if(l.type == ROUTE){
backward_route_layer_gpu(l, net);
} else if(l.type == SHORTCUT){
backward_shortcut_layer_gpu(l, state);
}
}
}
void update_network_gpu(network net)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == DECONVOLUTIONAL){
update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay);
} else if(l.type == CONNECTED){
update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == RNN){
update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == CRNN){
update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == LOCAL){
update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
float train_network_datum_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].type == DETECTION) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
| 4a202010ae71c6bb354bd33d0771fc1c1b66bbe5.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "deconvolutional_layer.h"
#include "maxpool_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
if(l.type == CONVOLUTIONAL){
forward_convolutional_layer_gpu(l, state);
} else if(l.type == DECONVOLUTIONAL){
forward_deconvolutional_layer_gpu(l, state);
} else if(l.type == ACTIVE){
forward_activation_layer_gpu(l, state);
} else if(l.type == LOCAL){
forward_local_layer_gpu(l, state);
} else if(l.type == DETECTION){
forward_detection_layer_gpu(l, state);
} else if(l.type == CONNECTED){
forward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
forward_rnn_layer_gpu(l, state);
} else if(l.type == CRNN){
forward_crnn_layer_gpu(l, state);
} else if(l.type == CROP){
forward_crop_layer_gpu(l, state);
} else if(l.type == COST){
forward_cost_layer_gpu(l, state);
} else if(l.type == SOFTMAX){
forward_softmax_layer_gpu(l, state);
} else if(l.type == NORMALIZATION){
forward_normalization_layer_gpu(l, state);
} else if(l.type == MAXPOOL){
forward_maxpool_layer_gpu(l, state);
} else if(l.type == AVGPOOL){
forward_avgpool_layer_gpu(l, state);
} else if(l.type == DROPOUT){
forward_dropout_layer_gpu(l, state);
} else if(l.type == ROUTE){
forward_route_layer_gpu(l, net);
} else if(l.type == SHORTCUT){
forward_shortcut_layer_gpu(l, state);
}
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
if(l.type == CONVOLUTIONAL){
backward_convolutional_layer_gpu(l, state);
} else if(l.type == DECONVOLUTIONAL){
backward_deconvolutional_layer_gpu(l, state);
} else if(l.type == ACTIVE){
backward_activation_layer_gpu(l, state);
} else if(l.type == LOCAL){
backward_local_layer_gpu(l, state);
} else if(l.type == MAXPOOL){
if(i != 0) backward_maxpool_layer_gpu(l, state);
} else if(l.type == AVGPOOL){
if(i != 0) backward_avgpool_layer_gpu(l, state);
} else if(l.type == DROPOUT){
backward_dropout_layer_gpu(l, state);
} else if(l.type == DETECTION){
backward_detection_layer_gpu(l, state);
} else if(l.type == NORMALIZATION){
backward_normalization_layer_gpu(l, state);
} else if(l.type == SOFTMAX){
if(i != 0) backward_softmax_layer_gpu(l, state);
} else if(l.type == CONNECTED){
backward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
backward_rnn_layer_gpu(l, state);
} else if(l.type == CRNN){
backward_crnn_layer_gpu(l, state);
} else if(l.type == COST){
backward_cost_layer_gpu(l, state);
} else if(l.type == ROUTE){
backward_route_layer_gpu(l, net);
} else if(l.type == SHORTCUT){
backward_shortcut_layer_gpu(l, state);
}
}
}
void update_network_gpu(network net)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == DECONVOLUTIONAL){
update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay);
} else if(l.type == CONNECTED){
update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == RNN){
update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == CRNN){
update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == LOCAL){
update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
float train_network_datum_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].type == DETECTION) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
column_utilities.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/detail/copy.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <thrust/equal.h>
#include <gmock/gmock.h>
namespace cudf {
namespace test {
// Property equality
void expect_column_properties_equal(cudf::column_view const& lhs, cudf::column_view const& rhs) {
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
EXPECT_EQ(lhs.null_count(), rhs.null_count());
if (lhs.size() > 0) {
EXPECT_EQ(lhs.nullable(), rhs.nullable());
}
EXPECT_EQ(lhs.has_nulls(), rhs.has_nulls());
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs): comp(d_lhs, d_rhs) {
}
cudf::experimental::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) {
return !comp(index, index);
}
};
void expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs,
bool print_all_differences) {
expect_column_properties_equal(lhs, rhs);
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
corresponding_rows_unequal(*d_lhs, *d_rhs));
CUDA_TRY(hipDeviceSynchronize());
differences.resize(thrust::distance(differences.begin(), diff_iter));
if (diff_iter > differences.begin()) {
if (print_all_differences) {
//
// If there are differences, display them all
//
std::ostringstream buffer;
buffer << "differences:" << std::endl;
cudf::table_view source_table ({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::experimental::table> diff_table = cudf::experimental::gather(source_table,
diff_column);
//
// Need to pull back the differences
//
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0 ; i < differences.size() ; ++i) {
buffer << "lhs[" << differences[i] << "] = " << h_left_strings[i]
<< ", rhs[" << differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
//
// If there are differences, just display the first one
//
int index = differences[0];
auto diff_lhs = cudf::experimental::detail::slice(lhs, index, index+1);
auto diff_rhs = cudf::experimental::detail::slice(rhs, index, index+1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0}) << "first difference: "
<< "lhs[" << index << "] = "
<< to_string(diff_lhs, "")
<< ", rhs[" << index << "] = "
<< to_string(diff_rhs, "");
}
}
}
// Bitwise equality
void expect_equal_buffers(void const* lhs, void const* rhs,
std::size_t size_bytes) {
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes,
typed_rhs));
}
// copy column bitmask to host (used by to_host())
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) {
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
CUDA_TRY(hipMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
return host_bitmask;
}
else {
return std::vector<bitmask_type>{};
}
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx) ? std::to_string(h_data.first[idx]) : std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return std::to_string(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
this->template operator()<cudf::string_view>(*col_as_strings, out);
}
template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL");
});
} else {
out = std::move(h_data.first);
}
}
};
std::vector<std::string> to_strings(cudf::column_view const& col) {
std::vector<std::string> reply;
cudf::experimental::type_dispatcher(col.type(),
column_view_printer{},
col,
reply);
return reply;
}
std::string to_string(cudf::column_view const& col, std::string const& delimiter) {
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col);
std::copy(h_data.begin(), h_data.end() - 1, std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
buffer << h_data.back();
return buffer.str();
}
void print(cudf::column_view const& col, std::ostream &os, std::string const& delimiter) {
os << to_string(col, delimiter);
}
} // namespace test
} // namespace cudf
| column_utilities.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/detail/copy.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <thrust/equal.h>
#include <gmock/gmock.h>
namespace cudf {
namespace test {
// Property equality
void expect_column_properties_equal(cudf::column_view const& lhs, cudf::column_view const& rhs) {
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
EXPECT_EQ(lhs.null_count(), rhs.null_count());
if (lhs.size() > 0) {
EXPECT_EQ(lhs.nullable(), rhs.nullable());
}
EXPECT_EQ(lhs.has_nulls(), rhs.has_nulls());
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs): comp(d_lhs, d_rhs) {
}
cudf::experimental::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) {
return !comp(index, index);
}
};
void expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs,
bool print_all_differences) {
expect_column_properties_equal(lhs, rhs);
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
corresponding_rows_unequal(*d_lhs, *d_rhs));
CUDA_TRY(cudaDeviceSynchronize());
differences.resize(thrust::distance(differences.begin(), diff_iter));
if (diff_iter > differences.begin()) {
if (print_all_differences) {
//
// If there are differences, display them all
//
std::ostringstream buffer;
buffer << "differences:" << std::endl;
cudf::table_view source_table ({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::experimental::table> diff_table = cudf::experimental::gather(source_table,
diff_column);
//
// Need to pull back the differences
//
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0 ; i < differences.size() ; ++i) {
buffer << "lhs[" << differences[i] << "] = " << h_left_strings[i]
<< ", rhs[" << differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
//
// If there are differences, just display the first one
//
int index = differences[0];
auto diff_lhs = cudf::experimental::detail::slice(lhs, index, index+1);
auto diff_rhs = cudf::experimental::detail::slice(rhs, index, index+1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0}) << "first difference: "
<< "lhs[" << index << "] = "
<< to_string(diff_lhs, "")
<< ", rhs[" << index << "] = "
<< to_string(diff_rhs, "");
}
}
}
// Bitwise equality
void expect_equal_buffers(void const* lhs, void const* rhs,
std::size_t size_bytes) {
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes,
typed_rhs));
}
// copy column bitmask to host (used by to_host())
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) {
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
CUDA_TRY(cudaMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
return host_bitmask;
}
else {
return std::vector<bitmask_type>{};
}
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx) ? std::to_string(h_data.first[idx]) : std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return std::to_string(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
this->template operator()<cudf::string_view>(*col_as_strings, out);
}
template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL");
});
} else {
out = std::move(h_data.first);
}
}
};
std::vector<std::string> to_strings(cudf::column_view const& col) {
std::vector<std::string> reply;
cudf::experimental::type_dispatcher(col.type(),
column_view_printer{},
col,
reply);
return reply;
}
std::string to_string(cudf::column_view const& col, std::string const& delimiter) {
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col);
std::copy(h_data.begin(), h_data.end() - 1, std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
buffer << h_data.back();
return buffer.str();
}
void print(cudf::column_view const& col, std::ostream &os, std::string const& delimiter) {
os << to_string(col, delimiter);
}
} // namespace test
} // namespace cudf
|
6c6ae15f53c01ff8980d3f85f70fa5e548125358.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <E.Rozenberg@cwi.nl>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
// Includes
#include <stdio.h>
#include <assert.h>
#include <vector>
// CUDA runtime includes
#include <hip/hip_runtime_api.h>
#include <cuda/api_wrappers.hpp>
#include "../helper_string.h"
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
hipIpcEventHandle_t eventHandle;
hipIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
count = cuda::device::count();
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
auto prop = cuda::device::get(i).properties();
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != hipComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
bool canAccessPeer_0i, canAccessPeer_i0;
auto device_0 = cuda::device::get(0);
for (i = 1; i < uvaCount; i++)
{
auto device_i = cuda::device::get(i);
canAccessPeer_0i = cuda::device::peer_to_peer::can_access(device_0, device_i);
canAccessPeer_i0 = cuda::device::peer_to_peer::can_access(device_i, device_0);
if (canAccessPeer_0i and canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
auto device = cuda::device::get(s_mem[index].device).make_current();
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
std::vector<cuda::event_t> events;
events.reserve(MAX_DEVICES * PROCESSES_PER_DEVICE - 1);
int* d_ptr = reinterpret_cast<int*>(
device.memory().allocate(DATA_BUF_SIZE * g_processCount * sizeof(int))
);
s_mem[0].memHandle = cuda::memory::ipc::export_((void *) d_ptr);
cuda::memory::copy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
events.push_back(cuda::event::ipc::import(device, s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
device.synchronize(events[i-1]);
}
//-------------------------------------------
// b.3
procBarrier();
cuda::memory::copy(h_results, d_ptr + DATA_BUF_SIZE, DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int));
cuda::memory::device::free(d_ptr);
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
auto current_device = cuda::device::current::get();
auto event = cuda::event::create(
current_device,
cuda::event::sync_by_blocking,
cuda::event::dont_record_timings,
cuda::event::interprocess);
s_mem[index].eventHandle = cuda::event::ipc::export_(event);
// b.1: wait until proc 0 initializes device memory
procBarrier();
{
cuda::memory::ipc::imported_t<int> d_ptr(s_mem[0].memHandle);
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
cuda::launch(
simpleKernel,
{ blocks, threads },
d_ptr.get() + index *DATA_BUF_SIZE, d_ptr.get(), index + 1
);
event.record();
// b.2
procBarrier();
} // imported memory handle is closed
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
// the event is destroyed here
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
cuda::device::get(s_devices->ordinals[i]).synchronize();
}
printf("SUCCESS\n");
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
| 6c6ae15f53c01ff8980d3f85f70fa5e548125358.cu | /**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <E.Rozenberg@cwi.nl>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
// Includes
#include <stdio.h>
#include <assert.h>
#include <vector>
// CUDA runtime includes
#include <cuda_runtime_api.h>
#include <cuda/api_wrappers.hpp>
#include "../helper_string.h"
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
cudaIpcEventHandle_t eventHandle;
cudaIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
count = cuda::device::count();
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
auto prop = cuda::device::get(i).properties();
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != cudaComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
bool canAccessPeer_0i, canAccessPeer_i0;
auto device_0 = cuda::device::get(0);
for (i = 1; i < uvaCount; i++)
{
auto device_i = cuda::device::get(i);
canAccessPeer_0i = cuda::device::peer_to_peer::can_access(device_0, device_i);
canAccessPeer_i0 = cuda::device::peer_to_peer::can_access(device_i, device_0);
if (canAccessPeer_0i and canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
auto device = cuda::device::get(s_mem[index].device).make_current();
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
std::vector<cuda::event_t> events;
events.reserve(MAX_DEVICES * PROCESSES_PER_DEVICE - 1);
int* d_ptr = reinterpret_cast<int*>(
device.memory().allocate(DATA_BUF_SIZE * g_processCount * sizeof(int))
);
s_mem[0].memHandle = cuda::memory::ipc::export_((void *) d_ptr);
cuda::memory::copy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
events.push_back(cuda::event::ipc::import(device, s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
device.synchronize(events[i-1]);
}
//-------------------------------------------
// b.3
procBarrier();
cuda::memory::copy(h_results, d_ptr + DATA_BUF_SIZE, DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int));
cuda::memory::device::free(d_ptr);
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
auto current_device = cuda::device::current::get();
auto event = cuda::event::create(
current_device,
cuda::event::sync_by_blocking,
cuda::event::dont_record_timings,
cuda::event::interprocess);
s_mem[index].eventHandle = cuda::event::ipc::export_(event);
// b.1: wait until proc 0 initializes device memory
procBarrier();
{
cuda::memory::ipc::imported_t<int> d_ptr(s_mem[0].memHandle);
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
cuda::launch(
simpleKernel,
{ blocks, threads },
d_ptr.get() + index *DATA_BUF_SIZE, d_ptr.get(), index + 1
);
event.record();
// b.2
procBarrier();
} // imported memory handle is closed
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
// the event is destroyed here
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
cuda::device::get(s_devices->ordinals[i]).synchronize();
}
printf("SUCCESS\n");
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
|
25ac09970f48f9d032fab1f3efa212019c2f9ac5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//2GPU
#include <iostream>
#include <stdio.h>
#include <cstring>
#include <cstdlib>
#include <utility> //C++11
#include <omp.h>
#define BLOCK 32
#define GPUNUM 2
#define X 32
#define Y 32
#define ELEM (size_t)(X*Y)
#define STEP 100
#define SLV (1*X)
using namespace std;
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess){ \
cerr << "Error:" << __FILE__ << endl; \
cerr << "code : "<< error << " reason : "<<hipGetErrorString(error) << endl; \
} \
}
void checkResult(float* hostRef,float* devRef,const int N){
float epsilon = 1e-3;
bool match = 1;
int i;
float Ref;
float Host;
float Dev;
for(i=0;i<N;i++){
Host = hostRef[i];
Dev = devRef[i];
Ref = Host-Dev;
//printf("host:%d,device:%d\n",hostRef[i],devRef[i]);
if((float)fabsf(Ref)>epsilon){
match = 0;
cout << "Arrays don't match.on count of "<<i<< " element." <<endl;
cout << "Elapsed : " << Ref << " Host : " << Host << " | GPU : " << Dev << endl;
printf("Elapsed : %f Host : %f GPU : %f\n",Ref,Host,Dev );
break;
}
}
if(match){
cout <<"Arrays match.";
}
cout << endl;
return;
}
void initializeData(float* A,int size){
//
time_t t;
int i;
srand((unsigned int)time(&t));
for(i=0;i<size;i++){
A[i] = (float)(rand()*0xFFF) / 10000000.0F;
}
return;
}
void print(float* Src){
for(int i=0;i<ELEM;i++){
cout << Src[i] << " ";
if((i+1)%X==0)
cout << endl;
}
}
void print(float* Def,float* Src,float* Rst,const int elem){
for(int i=0;i<elem;i++){
cout << "\t" <<i << " | " << Def[i] << " | " <<Src[i] << " | "<<Rst[i] << endl;
}
}
void Host2DStencil(float* Src,float* Dst){
for(int time_step=0;time_step<STEP;time_step++){
for(size_t all_loop=0;all_loop<ELEM;all_loop++){
int mat_x=all_loop%X;//X
int mat_y=all_loop/X;//Y
//cout << "Time: "<<time_step<< " | X:" << mat_x << " | Y:"<<mat_y ;
if(mat_x!=0 && mat_x!=X-1 && mat_y!=0 && mat_y!=Y-1){
//
Dst[all_loop] = 0.6*Src[all_loop] + 0.1*(Src[all_loop-1]+Src[all_loop+1]+Src[all_loop-X]+Src[all_loop+X]);
}
}
swap(Src,Dst);
}
}
//Single
/*
__global__ void StencilOneStep(float* Src,float* Dst){
size_t index = threadIdx.x + blockDim.x * blockIdx.x;
size_t mat_x = index % X; //X
size_t mat_y = index / X; //Y
if(mat_x != 0 && mat_x != X-1 && mat_y != 0 && mat_y != Y-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1] + Src[index+1] + Src[index+X] + Src[index-X]);
}
}
*/
//Multi
__global__ void StencilOneStep(float* Src,float* Dst,const int MainElem,const int Dev){
size_t index = threadIdx.x + blockDim.x * blockIdx.x;
size_t mat_x = index % X;
//
switch(Dev){
case 0:
if(index>SLV+X && index<MainElem+2*SLV-X && mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
case GPUNUM-1:
if(index>X && index<MainElem+SLV-X && mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
default:
if(index>X && index<MainElem+2*SLV-X&& mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
}
}
int main(int argc,char** argv){
float* Src = new float[ELEM];
float* Dst = new float[ELEM];
float* Rst = new float[ELEM];
float* Def = new float[ELEM];
//Src
initializeData(Src,ELEM);
memcpy(Dst,Src,sizeof(float)*ELEM);
memcpy(Def,Src,sizeof(float)*ELEM);
//HostTemp SLV
float* Left = new float[SLV*GPUNUM];
float* Right = new float[SLV*GPUNUM];
omp_set_num_threads(GPUNUM);
//Device
#pragma omp parallel
{
size_t MainElem = ELEM/GPUNUM;
size_t CalcElem = MainElem + 2*SLV;
size_t MainSize = MainElem * sizeof(float);
size_t SleeveSize = SLV*sizeof(float);//Single
size_t DeviceMemorySize = CalcElem * sizeof(float);
//Device
int Dev = omp_get_thread_num();
CHECK(hipSetDevice(Dev));
// cout << Dev << " : MainElem -> " <<MainElem << " : CalcElem -> " <<CalcElem << " : SLV -> " << SLV << endl;
//
dim3 block(BLOCK);
dim3 grid((CalcElem+block.x-1)/block.x);
//()
size_t MainAddress = Dev*MainElem;
// cout << Dev << " : StartAddress -> " << MainAddress << endl;
float *d_Src,*d_Dst;
CHECK(hipMalloc(&d_Src,DeviceMemorySize));
CHECK(hipMalloc(&d_Dst,DeviceMemorySize));
//Init
CHECK(hipMemset(d_Src,0,DeviceMemorySize));
CHECK(hipMemset(d_Dst,0,DeviceMemorySize));
//Memcpy
if(Dev==0){
// cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+SleeveSize <<"Byte" <<endl;
CHECK(hipMemcpy(&d_Src[SLV],&Src[MainAddress],MainSize+SleeveSize,hipMemcpyHostToDevice));
CHECK(hipMemcpy(&d_Dst[SLV],&Src[MainAddress],MainSize+SleeveSize,hipMemcpyHostToDevice));
}else if(Dev==GPUNUM-1){
//cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+SleeveSize <<"Byte" <<endl;
CHECK(hipMemcpy(&d_Src[0],&Src[MainAddress-SLV],MainSize+SleeveSize,hipMemcpyHostToDevice));
CHECK(hipMemcpy(&d_Dst[0],&Src[MainAddress-SLV],MainSize+SleeveSize,hipMemcpyHostToDevice));
}else{
//cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+2*SleeveSize <<"Byte" <<endl;
CHECK(hipMemcpy(&d_Src[0],&Src[MainAddress-SLV],MainSize+2*SleeveSize,hipMemcpyHostToDevice));
CHECK(hipMemcpy(&d_Dst[0],&Src[MainAddress-SLV],MainSize+2*SleeveSize,hipMemcpyHostToDevice));
}
// cout << "block : "<< block.x << " | grid : " << grid.x << endl;
for(int st=0;st<STEP;st++){
//Stencil Calc
//cout << Dev <<" : iter -> " << st << endl;
hipLaunchKernelGGL(( StencilOneStep), dim3(grid),dim3(block), 0, 0, d_Src,d_Dst,MainElem,Dev);
swap(d_Src,d_Dst);
if(Dev!=0){
CHECK(hipMemcpy(&Left[Dev*SLV],&d_Src[SLV],SleeveSize,hipMemcpyDeviceToHost));
}
if(Dev!=GPUNUM-1){
CHECK(hipMemcpy(&Right[Dev*SLV],&d_Src[MainElem],SleeveSize,hipMemcpyDeviceToHost));
}
#pragma omp barrier
if(Dev!=0){
CHECK(hipMemcpy(&d_Src[0],&Right[(Dev-1)*SLV],SleeveSize,hipMemcpyHostToDevice));
}
if(Dev!=GPUNUM-1){
CHECK(hipMemcpy(&d_Src[SLV+MainElem],&Left[(Dev+1)*SLV],SleeveSize,hipMemcpyHostToDevice));
}
}
CHECK(hipMemcpy(&Rst[Dev*MainElem],&d_Src[SLV],MainSize,hipMemcpyDeviceToHost));
CHECK(hipGetLastError());
CHECK(hipFree(d_Src));
CHECK(hipFree(d_Dst));
}
Host2DStencil(Src,Dst);
checkResult(Src,Rst,ELEM);
delete Right;
delete Left;
delete Src;
delete Dst;
delete Rst;
delete Def;
return 0;
} | 25ac09970f48f9d032fab1f3efa212019c2f9ac5.cu | //2次元配列における複数台GPUを用いたステンシル計算
#include <iostream>
#include <stdio.h>
#include <cstring>
#include <cstdlib>
#include <utility> //C++11
#include <omp.h>
#define BLOCK 32
#define GPUNUM 2
#define X 32
#define Y 32
#define ELEM (size_t)(X*Y)
#define STEP 100
#define SLV (1*X)
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess){ \
cerr << "Error:" << __FILE__ << endl; \
cerr << "code : "<< error << " reason : "<<cudaGetErrorString(error) << endl; \
} \
}
void checkResult(float* hostRef,float* devRef,const int N){
float epsilon = 1e-3;
bool match = 1;
int i;
float Ref;
float Host;
float Dev;
for(i=0;i<N;i++){
Host = hostRef[i];
Dev = devRef[i];
Ref = Host-Dev;
//printf("host:%d,device:%d\n",hostRef[i],devRef[i]);
if((float)fabsf(Ref)>epsilon){
match = 0;
cout << "Arrays don't match.on count of "<<i<< " element." <<endl;
cout << "Elapsed : " << Ref << " Host : " << Host << " | GPU : " << Dev << endl;
printf("Elapsed : %f Host : %f GPU : %f\n",Ref,Host,Dev );
break;
}
}
if(match){
cout <<"Arrays match.";
}
cout << endl;
return;
}
void initializeData(float* A,int size){
//乱数で値を初期化します。
time_t t;
int i;
srand((unsigned int)time(&t));
for(i=0;i<size;i++){
A[i] = (float)(rand()*0xFFF) / 10000000.0F;
}
return;
}
void print(float* Src){
for(int i=0;i<ELEM;i++){
cout << Src[i] << " ";
if((i+1)%X==0)
cout << endl;
}
}
void print(float* Def,float* Src,float* Rst,const int elem){
for(int i=0;i<elem;i++){
cout << "\t" <<i << " | " << Def[i] << " | " <<Src[i] << " | "<<Rst[i] << endl;
}
}
void Host2DStencil(float* Src,float* Dst){
for(int time_step=0;time_step<STEP;time_step++){
for(size_t all_loop=0;all_loop<ELEM;all_loop++){
int mat_x=all_loop%X;//X成分
int mat_y=all_loop/X;//Y成分
//cout << "Time: "<<time_step<< " | X:" << mat_x << " | Y:"<<mat_y ;
if(mat_x!=0 && mat_x!=X-1 && mat_y!=0 && mat_y!=Y-1){
//端ならば計算しない
Dst[all_loop] = 0.6*Src[all_loop] + 0.1*(Src[all_loop-1]+Src[all_loop+1]+Src[all_loop-X]+Src[all_loop+X]);
}
}
swap(Src,Dst);
}
}
//Single版
/*
__global__ void StencilOneStep(float* Src,float* Dst){
size_t index = threadIdx.x + blockDim.x * blockIdx.x;
size_t mat_x = index % X; //X成分
size_t mat_y = index / X; //Y成分
if(mat_x != 0 && mat_x != X-1 && mat_y != 0 && mat_y != Y-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1] + Src[index+1] + Src[index+X] + Src[index-X]);
}
}
*/
//Multi版
__global__ void StencilOneStep(float* Src,float* Dst,const int MainElem,const int Dev){
size_t index = threadIdx.x + blockDim.x * blockIdx.x;
size_t mat_x = index % X;
//デバイス番号によって動作が変わる
switch(Dev){
case 0:
if(index>SLV+X && index<MainElem+2*SLV-X && mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
case GPUNUM-1:
if(index>X && index<MainElem+SLV-X && mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
default:
if(index>X && index<MainElem+2*SLV-X&& mat_x != 0 && mat_x != X-1){
Dst[index] = 0.6*Src[index] + 0.1*(Src[index-1]+Src[index+1]+Src[index+X]+Src[index-X]);
}
break;
}
}
int main(int argc,char** argv){
float* Src = new float[ELEM];
float* Dst = new float[ELEM];
float* Rst = new float[ELEM];
float* Def = new float[ELEM];
//Srcを乱数で初期化
initializeData(Src,ELEM);
memcpy(Dst,Src,sizeof(float)*ELEM);
memcpy(Def,Src,sizeof(float)*ELEM);
//HostTemp SLV
float* Left = new float[SLV*GPUNUM];
float* Right = new float[SLV*GPUNUM];
omp_set_num_threads(GPUNUM);
//Deviceメモリの確保
#pragma omp parallel
{
size_t MainElem = ELEM/GPUNUM;
size_t CalcElem = MainElem + 2*SLV;
size_t MainSize = MainElem * sizeof(float);
size_t SleeveSize = SLV*sizeof(float);//Single
size_t DeviceMemorySize = CalcElem * sizeof(float);
//Device番号の取得
int Dev = omp_get_thread_num();
CHECK(cudaSetDevice(Dev));
// cout << Dev << " : MainElem -> " <<MainElem << " : CalcElem -> " <<CalcElem << " : SLV -> " << SLV << endl;
//実行定義
dim3 block(BLOCK);
dim3 grid((CalcElem+block.x-1)/block.x);
//開始のアドレス(要素番号)
size_t MainAddress = Dev*MainElem;
// cout << Dev << " : StartAddress -> " << MainAddress << endl;
float *d_Src,*d_Dst;
CHECK(cudaMalloc(&d_Src,DeviceMemorySize));
CHECK(cudaMalloc(&d_Dst,DeviceMemorySize));
//Init
CHECK(cudaMemset(d_Src,0,DeviceMemorySize));
CHECK(cudaMemset(d_Dst,0,DeviceMemorySize));
//Memcpy
if(Dev==0){
// cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+SleeveSize <<"Byte" <<endl;
CHECK(cudaMemcpy(&d_Src[SLV],&Src[MainAddress],MainSize+SleeveSize,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&d_Dst[SLV],&Src[MainAddress],MainSize+SleeveSize,cudaMemcpyHostToDevice));
}else if(Dev==GPUNUM-1){
//cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+SleeveSize <<"Byte" <<endl;
CHECK(cudaMemcpy(&d_Src[0],&Src[MainAddress-SLV],MainSize+SleeveSize,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&d_Dst[0],&Src[MainAddress-SLV],MainSize+SleeveSize,cudaMemcpyHostToDevice));
}else{
//cout << "Copy GPU : " << Dev << " : Src ["<<MainAddress<<"]" << " : "<< MainSize+2*SleeveSize <<"Byte" <<endl;
CHECK(cudaMemcpy(&d_Src[0],&Src[MainAddress-SLV],MainSize+2*SleeveSize,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&d_Dst[0],&Src[MainAddress-SLV],MainSize+2*SleeveSize,cudaMemcpyHostToDevice));
}
// cout << "block : "<< block.x << " | grid : " << grid.x << endl;
for(int st=0;st<STEP;st++){
//Stencil Calc
//cout << Dev <<" : iter -> " << st << endl;
StencilOneStep<<<grid,block>>>(d_Src,d_Dst,MainElem,Dev);
swap(d_Src,d_Dst);
if(Dev!=0){
CHECK(cudaMemcpy(&Left[Dev*SLV],&d_Src[SLV],SleeveSize,cudaMemcpyDeviceToHost));
}
if(Dev!=GPUNUM-1){
CHECK(cudaMemcpy(&Right[Dev*SLV],&d_Src[MainElem],SleeveSize,cudaMemcpyDeviceToHost));
}
#pragma omp barrier
if(Dev!=0){
CHECK(cudaMemcpy(&d_Src[0],&Right[(Dev-1)*SLV],SleeveSize,cudaMemcpyHostToDevice));
}
if(Dev!=GPUNUM-1){
CHECK(cudaMemcpy(&d_Src[SLV+MainElem],&Left[(Dev+1)*SLV],SleeveSize,cudaMemcpyHostToDevice));
}
}
CHECK(cudaMemcpy(&Rst[Dev*MainElem],&d_Src[SLV],MainSize,cudaMemcpyDeviceToHost));
CHECK(cudaGetLastError());
CHECK(cudaFree(d_Src));
CHECK(cudaFree(d_Dst));
}
Host2DStencil(Src,Dst);
checkResult(Src,Rst,ELEM);
delete Right;
delete Left;
delete Src;
delete Dst;
delete Rst;
delete Def;
return 0;
} |
f79097cc20e91791faf8af94bc9b700a4e9e05a3.hip | // !!! This is a file automatically generated by hipify!!!
//#include "stdafx.h" //
#include "devMatchCuda.cuh" //
bool initCUDA(void) // CUDA
{
int count = 0;
printf("Start to detecte devices.........\n"); //
hipGetDeviceCount(&count); // 1.0
if (count == 0){
fprintf(stderr, "There is no device.\n");
return false;
}
printf("%d device/s detected.\n", count); //
int i;
for (i = 0; i < count; i++){ // CUDA
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) { //
if (prop.major >= 1) // 1
{
printf("Device %d: %s supports CUDA %d.%d.\n", i + 1, prop.name, prop.major, prop.minor);//CUDA
break;
}
}
}
if (i == count) { // CUDA1.x
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i); //
return true;
}
| f79097cc20e91791faf8af94bc9b700a4e9e05a3.cu | //#include "stdafx.h" // 引入预编译头文件
#include "devMatchCuda.cuh" // 引入导出函数声明头文件
bool initCUDA(void) // CUDA初始化函数
{
int count = 0;
printf("Start to detecte devices.........\n"); // 显示检测到的设备数
cudaGetDeviceCount(&count); // 检测计算能力大于等于1.0的设备数
if (count == 0){
fprintf(stderr, "There is no device.\n");
return false;
}
printf("%d device/s detected.\n", count); // 显示检测到的设备数
int i;
for (i = 0; i < count; i++){ // 依次验证检测到的设备是否支持CUDA
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { // 获得设备属性并验证是否正确
if (prop.major >= 1) // 验证主计算能力,即计算能力的第一位数是否大于1
{
printf("Device %d: %s supports CUDA %d.%d.\n", i + 1, prop.name, prop.major, prop.minor);//显示检测到的设备支持的CUDA版本
break;
}
}
}
if (i == count) { // 没有支持CUDA1.x的设备
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i); // 设置设备为主叫线程的当前设备
return true;
}
|
a3c6d7b589299f8ff4aced17f7e6b96b5767576b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mlfe/operators/impl/cuda/kernel/basic_arithmetic.h"
#include "mlfe/device_context/cuda_context.h"
#include <third_party/cub/hipcub/hipcub.hpp>
namespace mlfe{
namespace cuda_kernel{
template <class T> __global__
void negative_kernel(const size_t N, const T *x_ptr, T *y_ptr){
CUDA_1D_KERNEL_LOOP(i, N){
y_ptr[i] = -x_ptr[i];
}
}
template <>
void negative<float>(const size_t N, const float *x_ptr, float *y_ptr){
hipLaunchKernelGGL(( negative_kernel), dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, x_ptr, y_ptr);
}
template <class T> __global__
void scalar_add_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] + scalar;
}
}
template <>
void scalar_add_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
hipLaunchKernelGGL(( scalar_add_fwd_kernel), dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, b, c);
}
template <class T> __global__
void scalar_sub_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] - scalar;
}
}
template <>
void scalar_sub_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
hipLaunchKernelGGL(( scalar_sub_fwd_kernel), dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, b, c);
}
template <class T> __global__
void scalar_mul_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] * scalar;
}
}
template <>
void scalar_mul_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
hipLaunchKernelGGL(( scalar_mul_fwd_kernel), dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, b, c);
}
template <class T> __global__
void scalar_div_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] / scalar;
}
}
template <>
void scalar_div_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
hipLaunchKernelGGL(( scalar_div_fwd_kernel), dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, b, c);
}
template <typename T>
__global__ void scaled_accum(
const size_t N,
const T scale,
const T *from,
T *to)
{
CUDA_1D_KERNEL_LOOP(i, N){
to[i] += scale * from[i];
}
}
template <typename T>
__global__ void scaled_accum(
const size_t N,
const T *scale,
const T *from,
T *to)
{
T val = scale[0];
CUDA_1D_KERNEL_LOOP(i, N){
to[i] += val * from[i];
}
}
template <>
void eltwise_add_left_bwd(const size_t N, const float *dy, float *da)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, da);
}
template <>
void eltwise_add_right_bwd(const size_t N, const float *dy, float *db)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, db);
}
template <>
void eltwise_sub_left_bwd(const size_t N, const float *dy, float *da)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, da);
}
template <>
void eltwise_sub_right_bwd(const size_t N, const float *dy, float *db)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, -1, dy, db);
}
template <typename T>
__global__ void mul_accum(
const size_t N,
const T *a,
const T *b,
T *c)
{
CUDA_1D_KERNEL_LOOP(i, N){
c[i] += a[i] * b[i];
}
}
template <>
void eltwise_mul_left_bwd(const size_t N, const float *b, const float *dy, float *da)
{
hipLaunchKernelGGL(( mul_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, dy, da);
}
template <>
void eltwise_mul_right_bwd(const size_t N, const float *a, const float *dy, float *db)
{
hipLaunchKernelGGL(( mul_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, dy, db);
}
template <typename T>
__global__ void eltwise_div_left_bwd_kernel(
const size_t N,
const T *b,
const T *dy,
T *da)
{
CUDA_1D_KERNEL_LOOP(i, N){
da[i] += dy[i] / b[i];
}
}
template <>
void eltwise_div_left_bwd(
const size_t N,
const float *b,
const float *dy,
float *da)
{
hipLaunchKernelGGL(( eltwise_div_left_bwd_kernel<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, dy, da);
}
template <typename T>
__global__ void eltwise_div_right_bwd_kernel(
const size_t N,
const T *b,
const T *y,
const T *dy,
T *db)
{
CUDA_1D_KERNEL_LOOP(i, N){
db[i] += -dy[i] * y[i] / b[i];
}
}
template <>
void eltwise_div_right_bwd(
const size_t N,
const float *b,
const float *y,
const float *dy,
float *db)
{
hipLaunchKernelGGL(( eltwise_div_right_bwd_kernel<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, y, dy, db);
}
template <int BLOCK_THREADS, typename T>
__global__ void scaled_sum_kernel(
const size_t N,
const T scale,
const T *dy,
T *db)
{
typedef hipcub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T data = 0;
if(gid < N){
data = scale * dy[gid];
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template <>
void scalar_add_left_bwd(const size_t N, const float *dy, float *da)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, da);
}
template <>
void scalar_add_right_bwd(const size_t N, const float *dy, float *db)
{
hipLaunchKernelGGL(( scaled_sum_kernel<CUDA_CONTEXT_NUM_THREADS, float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, db);
}
template <>
void scalar_sub_left_bwd(const size_t N, const float *dy, float *da)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, 1, dy, da);
}
template <>
void scalar_sub_right_bwd(
const size_t N,
const float *dy,
float *db)
{
hipLaunchKernelGGL(( scaled_sum_kernel<CUDA_CONTEXT_NUM_THREADS, float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, -1, dy, db);
}
template <>
void scalar_mul_left_bwd(const size_t N, const float *b, const float *dy, float *da)
{
hipLaunchKernelGGL(( scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, dy, da);
}
template <int BLOCK_THREADS, typename T>
__global__ void scalar_mul_right_bwd_kernel(
const size_t N,
const T *a,
const T *dy,
T *db)
{
typedef hipcub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T data = 0;
if(gid < N){
data = a[gid] * dy[gid];
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template<>
void scalar_mul_right_bwd<float>(
const size_t N,
const float *a,
const float *dy,
float *db)
{
hipLaunchKernelGGL(( scalar_mul_right_bwd_kernel<CUDA_CONTEXT_NUM_THREADS, float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, a, dy, db);
}
template <typename T>
__global__ void inv_scaled_accum(
const size_t N,
const T *scale,
const T *vec,
T *out)
{
T val = T(1) / scale[0];
CUDA_1D_KERNEL_LOOP(i, N){
out[i] += val * vec[i];
}
}
template <>
void scalar_div_left_bwd(
const size_t N,
const float *b,
const float *dy,
float *da)
{
hipLaunchKernelGGL(( inv_scaled_accum<float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, dy, da);
}
template <int BLOCK_THREADS, typename T>
__global__ void scalar_div_right_bwd_kernel(
const size_t N,
const T *b,
const T *y,
const T *dy,
T *db)
{
typedef hipcub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T inv_b = T(1) / b[0];
T data = 0;
if(gid < N){
data = -dy[gid] * y[gid] * inv_b;
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template <>
void scalar_div_right_bwd<float>(
const size_t N,
const float *b,
const float *y,
const float *dy,
float*db)
{
hipLaunchKernelGGL(( scalar_div_right_bwd_kernel<CUDA_CONTEXT_NUM_THREADS, float>),
dim3(CUDA_CONTEXT_GET_BLOCKS(N)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, N, b, y, dy, db);
}
} // namespace cuda_kernel
} // namespace mlfe
| a3c6d7b589299f8ff4aced17f7e6b96b5767576b.cu | #include "mlfe/operators/impl/cuda/kernel/basic_arithmetic.h"
#include "mlfe/device_context/cuda_context.h"
#include <third_party/cub/cub/block/block_reduce.cuh>
namespace mlfe{
namespace cuda_kernel{
template <class T> __global__
void negative_kernel(const size_t N, const T *x_ptr, T *y_ptr){
CUDA_1D_KERNEL_LOOP(i, N){
y_ptr[i] = -x_ptr[i];
}
}
template <>
void negative<float>(const size_t N, const float *x_ptr, float *y_ptr){
negative_kernel<<<CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, x_ptr, y_ptr);
}
template <class T> __global__
void scalar_add_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] + scalar;
}
}
template <>
void scalar_add_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
scalar_add_fwd_kernel<<<CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, b, c);
}
template <class T> __global__
void scalar_sub_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] - scalar;
}
}
template <>
void scalar_sub_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
scalar_sub_fwd_kernel<<<CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, b, c);
}
template <class T> __global__
void scalar_mul_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] * scalar;
}
}
template <>
void scalar_mul_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
scalar_mul_fwd_kernel<<<CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, b, c);
}
template <class T> __global__
void scalar_div_fwd_kernel(const size_t N, const T *a, const T *b, T *c){
T scalar = b[0];
CUDA_1D_KERNEL_LOOP(i, N){
c[i] = a[i] / scalar;
}
}
template <>
void scalar_div_fwd<float>(
const size_t N,
const float *a,
const float *b,
float *c)
{
scalar_div_fwd_kernel<<<CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, b, c);
}
template <typename T>
__global__ void scaled_accum(
const size_t N,
const T scale,
const T *from,
T *to)
{
CUDA_1D_KERNEL_LOOP(i, N){
to[i] += scale * from[i];
}
}
template <typename T>
__global__ void scaled_accum(
const size_t N,
const T *scale,
const T *from,
T *to)
{
T val = scale[0];
CUDA_1D_KERNEL_LOOP(i, N){
to[i] += val * from[i];
}
}
template <>
void eltwise_add_left_bwd(const size_t N, const float *dy, float *da)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, da);
}
template <>
void eltwise_add_right_bwd(const size_t N, const float *dy, float *db)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, db);
}
template <>
void eltwise_sub_left_bwd(const size_t N, const float *dy, float *da)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, da);
}
template <>
void eltwise_sub_right_bwd(const size_t N, const float *dy, float *db)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, -1, dy, db);
}
template <typename T>
__global__ void mul_accum(
const size_t N,
const T *a,
const T *b,
T *c)
{
CUDA_1D_KERNEL_LOOP(i, N){
c[i] += a[i] * b[i];
}
}
template <>
void eltwise_mul_left_bwd(const size_t N, const float *b, const float *dy, float *da)
{
mul_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, dy, da);
}
template <>
void eltwise_mul_right_bwd(const size_t N, const float *a, const float *dy, float *db)
{
mul_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, dy, db);
}
template <typename T>
__global__ void eltwise_div_left_bwd_kernel(
const size_t N,
const T *b,
const T *dy,
T *da)
{
CUDA_1D_KERNEL_LOOP(i, N){
da[i] += dy[i] / b[i];
}
}
template <>
void eltwise_div_left_bwd(
const size_t N,
const float *b,
const float *dy,
float *da)
{
eltwise_div_left_bwd_kernel<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, dy, da);
}
template <typename T>
__global__ void eltwise_div_right_bwd_kernel(
const size_t N,
const T *b,
const T *y,
const T *dy,
T *db)
{
CUDA_1D_KERNEL_LOOP(i, N){
db[i] += -dy[i] * y[i] / b[i];
}
}
template <>
void eltwise_div_right_bwd(
const size_t N,
const float *b,
const float *y,
const float *dy,
float *db)
{
eltwise_div_right_bwd_kernel<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, y, dy, db);
}
template <int BLOCK_THREADS, typename T>
__global__ void scaled_sum_kernel(
const size_t N,
const T scale,
const T *dy,
T *db)
{
typedef cub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T data = 0;
if(gid < N){
data = scale * dy[gid];
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template <>
void scalar_add_left_bwd(const size_t N, const float *dy, float *da)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, da);
}
template <>
void scalar_add_right_bwd(const size_t N, const float *dy, float *db)
{
scaled_sum_kernel<CUDA_CONTEXT_NUM_THREADS, float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, db);
}
template <>
void scalar_sub_left_bwd(const size_t N, const float *dy, float *da)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, 1, dy, da);
}
template <>
void scalar_sub_right_bwd(
const size_t N,
const float *dy,
float *db)
{
scaled_sum_kernel<CUDA_CONTEXT_NUM_THREADS, float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, -1, dy, db);
}
template <>
void scalar_mul_left_bwd(const size_t N, const float *b, const float *dy, float *da)
{
scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, dy, da);
}
template <int BLOCK_THREADS, typename T>
__global__ void scalar_mul_right_bwd_kernel(
const size_t N,
const T *a,
const T *dy,
T *db)
{
typedef cub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T data = 0;
if(gid < N){
data = a[gid] * dy[gid];
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template<>
void scalar_mul_right_bwd<float>(
const size_t N,
const float *a,
const float *dy,
float *db)
{
scalar_mul_right_bwd_kernel<CUDA_CONTEXT_NUM_THREADS, float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, a, dy, db);
}
template <typename T>
__global__ void inv_scaled_accum(
const size_t N,
const T *scale,
const T *vec,
T *out)
{
T val = T(1) / scale[0];
CUDA_1D_KERNEL_LOOP(i, N){
out[i] += val * vec[i];
}
}
template <>
void scalar_div_left_bwd(
const size_t N,
const float *b,
const float *dy,
float *da)
{
inv_scaled_accum<float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, dy, da);
}
template <int BLOCK_THREADS, typename T>
__global__ void scalar_div_right_bwd_kernel(
const size_t N,
const T *b,
const T *y,
const T *dy,
T *db)
{
typedef cub::BlockReduce<T, BLOCK_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage smem_storage;
size_t gid = blockIdx.x * BLOCK_THREADS + threadIdx.x;
T inv_b = T(1) / b[0];
T data = 0;
if(gid < N){
data = -dy[gid] * y[gid] * inv_b;
}
T aggregate = BlockReduce(smem_storage).Sum(data);
if(threadIdx.x == 0){
atomicAdd(db, aggregate);
}
}
template <>
void scalar_div_right_bwd<float>(
const size_t N,
const float *b,
const float *y,
const float *dy,
float*db)
{
scalar_div_right_bwd_kernel<CUDA_CONTEXT_NUM_THREADS, float><<<
CUDA_CONTEXT_GET_BLOCKS(N),
CUDA_CONTEXT_NUM_THREADS>>>(N, b, y, dy, db);
}
} // namespace cuda_kernel
} // namespace mlfe
|
e65ab73f555c5cdaa95102ce6739151f84a9f027.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include <cutil.h>
#include "mr3.h"
__device__ __constant__ VG_MATRIX d_matrix[ATYPE2];
__device__ __inline__
void coulombforce_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2)
{
int k;
float dn2,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
dphir = qj * inr * inr * inr;
if(dn2==0.0f) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void coulombforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) coulombforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) coulombforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void coulombpot_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2)
{
int k;
float dn2,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
dphir = qj * inr;
if(dn2==0.0f) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void coulombpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) coulombpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) coulombpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void realforce_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2,
float r2min, float r2max)
{
int k;
float dn2,r,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
r = inr * dn2;
dphir = qj * ((float)(M_2_SQRTPI)*expf(-dn2) + erfcf(r)*inr)*inr*inr;
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void realforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) realforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) realforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void realpot_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2,
float r2min, float r2max)
{
int k;
float dn2,r,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
r = inr * dn2;
dphir = qj * erfcf(r) * inr;
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void realpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) realpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) realpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void vdwforce_inter(int xj[3], int xi[3], float fi[3], int t, float al2,
float r2min, float r2max)
{
int k;
float dn2,inr2,dn6,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= d_matrix[t].rscale;
inr2 = 1.0f/dn2;
dn6 = inr2*inr2*inr2;
dphir = d_matrix[t].gscale * dn6 * inr2 * (2.0f * dn6 - 1.0f);
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void vdwforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
int nat, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js,atypei;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
atypei = xivec[i].qatype.atype * nat;
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) vdwforce_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) vdwforce_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void vdwpot_inter(int xj[3], int xi[3], float fi[3], int t, float al2,
float r2min, float r2max)
{
int k;
float dn2,inr2,dn6,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= d_matrix[t].rscale;
inr2 = 1.0f/dn2;
dn6 = inr2*inr2*inr2;
dphir = d_matrix[t].gscale * dn6 * (dn6 - 1.0f);
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void vdwpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
int nat, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js,atypei;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
atypei = xivec[i].qatype.atype * nat;
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) vdwpot_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) vdwpot_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void ewald_dft_inter(int xj[3], float qj, float ki[3], float factor1,
float bsbci[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xj[k] * al2;
th += dr[k] * ki[k];
}
th *= (float)(2.0 * M_PI);
s = qj * sinf(th);
c = qj * cosf(th);
bsbci[0] += s * factor1;
bsbci[1] += c * factor1;
// bsbci[2] += (s * s + c * c) * 0.5f * factor1;
}
__global__
void ewald_dft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum, float *bsbc)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float bsbci[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
float ki[3],factor1;
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) bsbci[k] = 0.0f;
for(k=0; k<3; k++) ki[k] = kvec[i].k[k];
factor1 = kvec[i].factor1;
for (j = 0; j < n - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = x[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewald_dft_inter(s_xj[js].r,s_xj[js].qatype.q,ki,factor1,bsbci,al2);
}
__syncthreads();
if(tid < n - j) s_xj[tid] = x[j + tid];
__syncthreads();
for (js = 0; js < n - j; js++) ewald_dft_inter(s_xj[js].r,s_xj[js].qatype.q,ki,factor1,bsbci,al2);
if(i<knum) for(k=0; k<3; k++) bsbc[i*3+k] = bsbci[k];
}
__device__ __inline__
void ewaldforce_idft_inter(float kj[3], float bsbc[3], int xi[3], float fi[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] * al2;
th += dr[k] * kj[k];
}
th *= (float)(2.0 * M_PI);
s = sinf(th);
c = cosf(th);
for(k=0; k<3; k++){
fi[k] += (bsbc[1] * s - bsbc[0] * c) * kj[k];
}
}
__global__
void ewaldforce_idft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum,
float *bsbc, float *force)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float al2;
int js,xi[3];
__shared__ VG_KVEC s_kj[NLOAD];
__shared__ float s_bsbcj[NLOAD][3];
float fi[3];
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = x[i].r[k];
for (j = 0; j < knum - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_kj[tid] = kvec[j + tid];
if(tid < NLOAD) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewaldforce_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
}
__syncthreads();
if(tid < knum - j) s_kj[tid] = kvec[j + tid];
if(tid < knum - j) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
for (js = 0; js < knum - j; js++) ewaldforce_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
if(i<n) for(k=0; k<3; k++) force[i*3+k] = fi[k];
}
__device__ __inline__
void ewaldpot_idft_inter(float kj[3], float bsbc[3], int xi[3], float fi[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] * al2;
th += dr[k] * kj[k];
}
th *= (float)(2.0 * M_PI);
s = sinf(th);
c = cosf(th);
fi[0] += bsbc[1] * c + bsbc[0] * s;
}
__global__
void ewaldpot_idft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum,
float *bsbc, float *force)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float al2;
int js,xi[3];
__shared__ VG_KVEC s_kj[NLOAD];
__shared__ float s_bsbcj[NLOAD][3];
float fi[3];
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = x[i].r[k];
for (j = 0; j < knum - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_kj[tid] = kvec[j + tid];
if(tid < NLOAD) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewaldpot_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
}
__syncthreads();
if(tid < knum - j) s_kj[tid] = kvec[j + tid];
if(tid < knum - j) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
for (js = 0; js < knum - j; js++) ewaldpot_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
if(i<n) for(k=0; k<3; k++) force[i*3+k] = fi[k];
}
static void malloc_x(VG_XVEC **d_x, VG_XVEC **xf, int nalloc, int nthre)
{
static VG_XVEC *d_x_static=NULL,*xf_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(hipFree(d_x_static));
CUDA_SAFE_CALL(hipMalloc((void**)&d_x_static,sizeof(VG_XVEC)*(nalloc+nthre)));
free(xf_static);
if((xf_static=(VG_XVEC *)malloc(sizeof(VG_XVEC)*(nalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc xf_static **\n");
exit(1);
}
nalloc_bak=nalloc;
}
*d_x=d_x_static;
*xf=xf_static;
}
static void malloc_x2(VG_XVEC **d_x, VG_XVEC **xf, int nalloc, int nthre)
{
static VG_XVEC *d_x_static=NULL,*xf_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(hipFree(d_x_static));
CUDA_SAFE_CALL(hipMalloc((void**)&d_x_static,sizeof(VG_XVEC)*(nalloc+nthre)));
free(xf_static);
if((xf_static=(VG_XVEC *)malloc(sizeof(VG_XVEC)*(nalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc xf_static **\n");
exit(1);
}
nalloc_bak=nalloc;
}
*d_x=d_x_static;
*xf=xf_static;
}
static void malloc_f(float **d_force, float **forcef, int nalloc)
{
static float *d_force_static=NULL,*forcef_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(hipFree(d_force_static));
CUDA_SAFE_CALL(hipMalloc((void**)&d_force_static,sizeof(float)*nalloc*3));
free(forcef_static);
if((forcef_static=(float *)malloc(sizeof(float)*nalloc*3))==NULL){
fprintf(stderr,"** error : can't malloc forcef_static **\n");
exit(1);
}
bzero(forcef_static,sizeof(float)*nalloc*3);
nalloc_bak=nalloc;
}
*d_force=d_force_static;
*forcef=forcef_static;
}
static void malloc_k(VG_KVEC **d_k, VG_KVEC **kf, int kalloc, int nthre)
{
static VG_KVEC *d_k_static=NULL,*kf_static=NULL;
static int kalloc_bak=0;
if(kalloc>kalloc_bak){
if(kalloc<KMAX) kalloc=KMAX;
CUDA_SAFE_CALL(hipFree(d_k_static));
CUDA_SAFE_CALL(hipMalloc((void**)&d_k_static,sizeof(VG_KVEC)*(kalloc+nthre)));
free(kf_static);
if((kf_static=(VG_KVEC *)malloc(sizeof(VG_KVEC)*(kalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc kf_static **\n");
exit(1);
}
kalloc_bak=kalloc;
}
*d_k=d_k_static;
*kf=kf_static;
}
static void malloc_and_make_index(int n, int nat, int atype[], int **index_ret)
{
int i,at,na[ATYPE],offset[ATYPE],*index;
if((index=(int *)malloc(sizeof(int)*n))==NULL){
fprintf(stderr,"** error : can't malloc index **\n");
exit(1);
}
for(at=0;at<nat;at++) na[at]=0;
for(i=0;i<n;i++) na[atype[i]]++;
offset[0]=0;
for(at=1;at<nat;at++) offset[at]=offset[at-1]+na[at-1];
for(at=0;at<nat;at++) na[at]=0;
for(i=0;i<n;i++){
at=atype[i];
index[i]=offset[at]+na[at];// i:original, index[i]:new
na[at]++;
}
*index_ret=index;
}
static void free_index(int *index)
{
free(index);
}
static void send_matrix(int nat, double *gscale, double *rscale)
{
int i,j;
VG_MATRIX *matrix;
// send matrix
if((matrix=(VG_MATRIX *)malloc(sizeof(VG_MATRIX)*nat*nat))==NULL){
fprintf(stderr,"** error : can't malloc matrix **\n");
exit(1);
}
for(i=0;i<nat;i++){
for(j=0;j<nat;j++){
matrix[i*nat+j].gscale=(float)(gscale[i*nat+j]);
matrix[i*nat+j].rscale=(float)(rscale[i*nat+j]);
}
}
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_matrix,matrix,sizeof(VG_MATRIX)*nat*nat));
free(matrix);
}
static void copy_and_send_x(int n, double *x, int *atype, int *index,
double xmax_1[3], VG_XVEC *vec, VG_XVEC *d_x)
{
int i,j;
DI2 di2;
for(i=0;i<n;i++){
int idx=index[i];
for(j=0;j<3;j++){
di2.d=x[i*3+j]*xmax_1[j]+0x180000;
vec[idx].r[j]=di2.fi2.fi0.i;
}
vec[idx].qatype.atype=atype[i];
}
CUDA_SAFE_CALL(hipMemcpy(d_x,vec,sizeof(VG_XVEC)*n,hipMemcpyHostToDevice));
}
static void copy_and_send_xq(int n, double *x, double *q,
double xmax_1[3], VG_XVEC *vec, VG_XVEC *d_x)
{
int i,j;
DI2 di2;
for(i=0;i<n;i++){
for(j=0;j<3;j++){
di2.d=x[i*3+j]*xmax_1[j]+0x180000;
vec[i].r[j]=di2.fi2.fi0.i;
}
vec[i].qatype.q=q[i];
}
CUDA_SAFE_CALL(hipMemcpy(d_x,vec,sizeof(VG_XVEC)*n,hipMemcpyHostToDevice));
}
static void copy_and_send_k(int knum, int *k, double alpha, double epsilon,
double cellsize_1[3],
VG_KVEC *vec, VG_KVEC *d_k)
{
int i,j;
double ktmp,r2,kvtmp,eps1=1.0/epsilon,vol1;
double alpha4=1.0/(4.0*alpha*alpha);
for(j=0,vol1=1.0;j<3;j++) vol1*=cellsize_1[j];
for(i=0;i<knum;i++){
for(j=0,r2=0.0;j<3;j++){
ktmp=(double)k[i*3+j];
vec[i].k[j]=(float)ktmp;
kvtmp=2.0*M_PI*ktmp*cellsize_1[j];
r2+=kvtmp*kvtmp;
}
vec[i].factor1=2.0*eps1*vol1*exp(-r2*alpha4)/r2;
}
CUDA_SAFE_CALL(hipMemcpy(d_k,vec,sizeof(VG_KVEC)*knum,hipMemcpyHostToDevice));
}
static void get_result_q(int n, float *d_force, float *forcef,
double *q, double rfac, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
double factor;
CUDA_SAFE_CALL(hipMemcpy(forcef,d_force,sizeof(float)*n*3,hipMemcpyDeviceToHost));
for(i=0;i<n;i++){
factor=q[i]*rfac;
for(j=0;j<3;j++) force[i*3+j]+=forcef[i*3+j]*factor;
}
}
static void get_result_q3(int n, float *d_force, float *forcef,
double *q, double rfac[3], double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
double factor;
CUDA_SAFE_CALL(hipMemcpy(forcef,d_force,sizeof(float)*n*3,hipMemcpyDeviceToHost));
for(i=0;i<n;i++){
for(j=0;j<3;j++){
factor=q[i]*rfac[j];
force[i*3+j]+=forcef[i*3+j]*factor;
}
}
}
static void get_result(int n, float *d_force, float *forcef, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
CUDA_SAFE_CALL(hipMemcpy(forcef,d_force,sizeof(float)*n*3,hipMemcpyDeviceToHost));
for(i=0;i<n;i++) for(j=0;j<3;j++) force[i*3+j]+=forcef[i*3+j];
}
static void get_result_index(int n, int *index, float *d_force, float *forcef, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
CUDA_SAFE_CALL(hipMemcpy(forcef,d_force,sizeof(float)*n*3,hipMemcpyDeviceToHost));
for(i=0;i<n;i++) for(j=0;j<3;j++) force[i*3+j]+=forcef[index[i]*3+j];
}
void MR3calccoulomb_ij(int ni, double xi[], double qi[], double force[],
int nj, double xj[], double qj[],
double rscale,
int tblno, double xmax, int periodicflag)
{
VG_XVEC *d_xi,*xif;
VG_XVEC *d_xj,*xjf;
float *d_force,*forcef;
if((periodicflag & 1)==0) xmax*=2.0;
double xmax_1[3]={1.0/xmax,1.0/xmax,1.0/xmax};
float r2min=MD_REAL_R2MIN,r2max=MD_REAL_R2MAX;
float rscale2f=(float)(rscale*rscale);
malloc_x(&d_xi,&xif,ni,NTHRE);
malloc_x2(&d_xj,&xjf,nj,NTHRE);
malloc_f(&d_force,&forcef,ni);
copy_and_send_xq(ni,xi,qi,xmax_1,xif,d_xi);
copy_and_send_xq(nj,xj,qj,xmax_1,xjf,d_xj);
switch(tblno){
case 0:
hipLaunchKernelGGL(( coulombforce_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,d_force);
break;
case 1:
hipLaunchKernelGGL(( coulombpot_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,d_force);
break;
case 6:
hipLaunchKernelGGL(( realforce_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,r2min,r2max,d_force);
break;
case 7:
hipLaunchKernelGGL(( realpot_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,r2min,r2max,d_force);
break;
default:
fprintf(stderr,"** error : not supported tblno = %d **\n",tblno);
exit(1);
break;
}
CUT_CHECK_ERROR("Kernel execution failed");
if(tblno==0 || tblno==1) get_result(ni,d_force,forcef,force);
else if(tblno==6) get_result_q(ni,d_force,forcef,qi,rscale*rscale*rscale,force);
else if(tblno==7) get_result_q(ni,d_force,forcef,qi,rscale,force);
}
void MR3calcvdw_ij(int ni, double xi[], int atypei[], double force[],
int nj, double xj[], int atypej[],
int nat, double gscale[], double rscale[],
int tblno, double xmax, int periodicflag)
{
VG_XVEC *d_xi,*xif;
VG_XVEC *d_xj,*xjf;
float *d_force,*forcef;
int *indexi,*indexj;
if((periodicflag & 1)==0) xmax*=2.0;
double xmax_1[3]={1.0/xmax,1.0/xmax,1.0/xmax};
float r2min=MD_LJ_R2MIN,r2max=MD_LJ_R2MAX;
if(nat>ATYPE){
fprintf(stderr,"** error : nat is too large **\n");
exit(1);
}
malloc_x(&d_xi,&xif,ni,NTHRE);
malloc_x2(&d_xj,&xjf,nj,NTHRE);
malloc_f(&d_force,&forcef,ni);
malloc_and_make_index(ni,nat,atypei,&indexi);
malloc_and_make_index(nj,nat,atypej,&indexj);
send_matrix(nat,gscale,rscale);
copy_and_send_x(ni,xi,atypei,indexi,xmax_1,xif,d_xi);
copy_and_send_x(nj,xj,atypej,indexj,xmax_1,xjf,d_xj);
switch(tblno){
case 2:
hipLaunchKernelGGL(( vdwforce_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
nat,(float)xmax,r2min,r2max,d_force);
break;
case 3:
hipLaunchKernelGGL(( vdwpot_kernel), dim3((ni+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, ni,d_xi,nj,d_xj,
nat,(float)xmax,r2min,r2max,d_force);
break;
default:
fprintf(stderr,"** error : not supported tblno = %d **\n",tblno);
exit(1);
break;
}
CUT_CHECK_ERROR("Kernel execution failed");
get_result_index(ni,indexi,d_force,forcef,force);
free_index(indexi);
free_index(indexj);
}
void MR3calcewald(int *k, int knum_org, double *x, int n, double *q,
double alpha, double epsilon, double cell[3][3],
double *force, double *tpot, double stress[3][3])
{
VG_XVEC *d_x,*xf;
float *d_force,*forcef;
int knum;
VG_KVEC *d_k,*kf;
float *d_bsbc,*bsbcf;
double cellsize_1[3]={1.0/cell[0][0],1.0/cell[1][1],1.0/cell[2][2]};
if(knum_org>=0) knum=knum_org;
else knum=-knum_org;
malloc_x(&d_x,&xf,n,NTHRE);
malloc_f(&d_force,&forcef,n);
malloc_k(&d_k,&kf,knum,NTHRE);
malloc_f(&d_bsbc,&bsbcf,knum);
copy_and_send_xq(n,x,q,cellsize_1,xf,d_x);
copy_and_send_k(knum,k,alpha,epsilon,cellsize_1,kf,d_k);
hipLaunchKernelGGL(( ewald_dft_kernel), dim3((knum+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, d_x,n,d_k,knum,d_bsbc);
CUDA_SAFE_CALL(hipMemcpy(bsbcf,d_bsbc,sizeof(float)*knum*3,hipMemcpyDeviceToHost));
*tpot=0.0;
for(int i=0;i<knum;i++){
*tpot+=(bsbcf[i*3]*bsbcf[i*3]+bsbcf[i*3+1]*bsbcf[i*3+1])*0.5/kf[i].factor1;
}
if(knum_org>=0){
hipLaunchKernelGGL(( ewaldforce_idft_kernel), dim3((n+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, d_x,n,d_k,knum,d_bsbc,d_force);
for(int j=0;j<3;j++) cellsize_1[j] *= 2.0 * M_PI;
get_result_q3(n,d_force,forcef,q,cellsize_1,force);
}
else{
hipLaunchKernelGGL(( ewaldpot_idft_kernel), dim3((n+NTHRE-1)/NTHRE), dim3(NTHRE) , 0, 0, d_x,n,d_k,knum,d_bsbc,d_force);
get_result_q(n,d_force,forcef,q,1.0,force);
}
}
| e65ab73f555c5cdaa95102ce6739151f84a9f027.cu | #include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include <cutil.h>
#include "mr3.h"
__device__ __constant__ VG_MATRIX d_matrix[ATYPE2];
__device__ __inline__
void coulombforce_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2)
{
int k;
float dn2,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
dphir = qj * inr * inr * inr;
if(dn2==0.0f) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void coulombforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) coulombforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) coulombforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void coulombpot_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2)
{
int k;
float dn2,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
dphir = qj * inr;
if(dn2==0.0f) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void coulombpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) coulombpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) coulombpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void realforce_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2,
float r2min, float r2max)
{
int k;
float dn2,r,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
r = inr * dn2;
dphir = qj * ((float)(M_2_SQRTPI)*expf(-dn2) + erfcf(r)*inr)*inr*inr;
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void realforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) realforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) realforce_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void realpot_inter(int xj[3], float qj, int xi[3], float fi[3], float rscale2f, float al2,
float r2min, float r2max)
{
int k;
float dn2,r,inr,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= rscale2f;
inr = rsqrtf(dn2);
r = inr * dn2;
dphir = qj * erfcf(r) * inr;
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void realpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
float rscale2f, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) realpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) realpot_inter(s_xj[js].r,s_xj[js].qatype.q,xi,fi,rscale2f,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void vdwforce_inter(int xj[3], int xi[3], float fi[3], int t, float al2,
float r2min, float r2max)
{
int k;
float dn2,inr2,dn6,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= d_matrix[t].rscale;
inr2 = 1.0f/dn2;
dn6 = inr2*inr2*inr2;
dphir = d_matrix[t].gscale * dn6 * inr2 * (2.0f * dn6 - 1.0f);
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir * dr[k];
}
__global__
void vdwforce_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
int nat, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js,atypei;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
atypei = xivec[i].qatype.atype * nat;
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) vdwforce_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) vdwforce_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void vdwpot_inter(int xj[3], int xi[3], float fi[3], int t, float al2,
float r2min, float r2max)
{
int k;
float dn2,inr2,dn6,dr[3],dphir;
dn2 = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] - xj[k];
dr[k] *= al2;
dn2 += dr[k] * dr[k];
}
dn2 *= d_matrix[t].rscale;
inr2 = 1.0f/dn2;
dn6 = inr2*inr2*inr2;
dphir = d_matrix[t].gscale * dn6 * (dn6 - 1.0f);
if(dn2<r2min || dn2>=r2max) dphir = 0.0f;
for(k=0; k<3; k++) fi[k] += dphir;
}
__global__
void vdwpot_kernel(int ni, VG_XVEC *xivec, int nj, VG_XVEC *xjvec,
int nat, float xmax, float r2min, float r2max, float *fvec)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float fi[3],al2;
int js,atypei;
__shared__ VG_XVEC s_xj[NLOAD];
int xi[3];
al2=scalbnf(xmax,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = xivec[i].r[k];
atypei = xivec[i].qatype.atype * nat;
for (j = 0; j < nj - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = xjvec[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) vdwpot_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
}
__syncthreads();
if(tid < nj - j) s_xj[tid] = xjvec[j + tid];
__syncthreads();
for (js = 0; js < nj - j; js++) vdwpot_inter(s_xj[js].r,xi,fi,atypei+s_xj[js].qatype.atype,al2,r2min,r2max);
if(i<ni) for(k=0; k<3; k++) fvec[i*3+k] = fi[k];
}
__device__ __inline__
void ewald_dft_inter(int xj[3], float qj, float ki[3], float factor1,
float bsbci[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xj[k] * al2;
th += dr[k] * ki[k];
}
th *= (float)(2.0 * M_PI);
s = qj * sinf(th);
c = qj * cosf(th);
bsbci[0] += s * factor1;
bsbci[1] += c * factor1;
// bsbci[2] += (s * s + c * c) * 0.5f * factor1;
}
__global__
void ewald_dft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum, float *bsbc)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float bsbci[3],al2;
int js;
__shared__ VG_XVEC s_xj[NLOAD];
float ki[3],factor1;
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) bsbci[k] = 0.0f;
for(k=0; k<3; k++) ki[k] = kvec[i].k[k];
factor1 = kvec[i].factor1;
for (j = 0; j < n - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_xj[tid] = x[j + tid];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewald_dft_inter(s_xj[js].r,s_xj[js].qatype.q,ki,factor1,bsbci,al2);
}
__syncthreads();
if(tid < n - j) s_xj[tid] = x[j + tid];
__syncthreads();
for (js = 0; js < n - j; js++) ewald_dft_inter(s_xj[js].r,s_xj[js].qatype.q,ki,factor1,bsbci,al2);
if(i<knum) for(k=0; k<3; k++) bsbc[i*3+k] = bsbci[k];
}
__device__ __inline__
void ewaldforce_idft_inter(float kj[3], float bsbc[3], int xi[3], float fi[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] * al2;
th += dr[k] * kj[k];
}
th *= (float)(2.0 * M_PI);
s = sinf(th);
c = cosf(th);
for(k=0; k<3; k++){
fi[k] += (bsbc[1] * s - bsbc[0] * c) * kj[k];
}
}
__global__
void ewaldforce_idft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum,
float *bsbc, float *force)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float al2;
int js,xi[3];
__shared__ VG_KVEC s_kj[NLOAD];
__shared__ float s_bsbcj[NLOAD][3];
float fi[3];
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = x[i].r[k];
for (j = 0; j < knum - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_kj[tid] = kvec[j + tid];
if(tid < NLOAD) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewaldforce_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
}
__syncthreads();
if(tid < knum - j) s_kj[tid] = kvec[j + tid];
if(tid < knum - j) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
for (js = 0; js < knum - j; js++) ewaldforce_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
if(i<n) for(k=0; k<3; k++) force[i*3+k] = fi[k];
}
__device__ __inline__
void ewaldpot_idft_inter(float kj[3], float bsbc[3], int xi[3], float fi[3], float al2)
{
int k;
float th,dr[3],s,c;
th = 0.0f;
for(k=0; k<3; k++){
dr[k] = xi[k] * al2;
th += dr[k] * kj[k];
}
th *= (float)(2.0 * M_PI);
s = sinf(th);
c = cosf(th);
fi[0] += bsbc[1] * c + bsbc[0] * s;
}
__global__
void ewaldpot_idft_kernel(VG_XVEC *x, int n, VG_KVEC *kvec, int knum,
float *bsbc, float *force)
{
int tid = threadIdx.x;
int i = blockIdx.x * NTHRE + tid;
int j,k;
float al2;
int js,xi[3];
__shared__ VG_KVEC s_kj[NLOAD];
__shared__ float s_bsbcj[NLOAD][3];
float fi[3];
al2=scalbnf(1.0f,-32);
for(k=0; k<3; k++) fi[k] = 0.0f;
for(k=0; k<3; k++) xi[k] = x[i].r[k];
for (j = 0; j < knum - NLOAD; j+=NLOAD){
__syncthreads();
if(tid < NLOAD) s_kj[tid] = kvec[j + tid];
if(tid < NLOAD) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
#pragma unroll 16
for (js = 0; js < NLOAD; js++) ewaldpot_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
}
__syncthreads();
if(tid < knum - j) s_kj[tid] = kvec[j + tid];
if(tid < knum - j) for(k=0;k<2;k++) s_bsbcj[tid][k] = bsbc[(j + tid)*3 + k];
__syncthreads();
for (js = 0; js < knum - j; js++) ewaldpot_idft_inter(s_kj[js].k,s_bsbcj[js],xi,fi,al2);
if(i<n) for(k=0; k<3; k++) force[i*3+k] = fi[k];
}
static void malloc_x(VG_XVEC **d_x, VG_XVEC **xf, int nalloc, int nthre)
{
static VG_XVEC *d_x_static=NULL,*xf_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(cudaFree(d_x_static));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_x_static,sizeof(VG_XVEC)*(nalloc+nthre)));
free(xf_static);
if((xf_static=(VG_XVEC *)malloc(sizeof(VG_XVEC)*(nalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc xf_static **\n");
exit(1);
}
nalloc_bak=nalloc;
}
*d_x=d_x_static;
*xf=xf_static;
}
static void malloc_x2(VG_XVEC **d_x, VG_XVEC **xf, int nalloc, int nthre)
{
static VG_XVEC *d_x_static=NULL,*xf_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(cudaFree(d_x_static));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_x_static,sizeof(VG_XVEC)*(nalloc+nthre)));
free(xf_static);
if((xf_static=(VG_XVEC *)malloc(sizeof(VG_XVEC)*(nalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc xf_static **\n");
exit(1);
}
nalloc_bak=nalloc;
}
*d_x=d_x_static;
*xf=xf_static;
}
static void malloc_f(float **d_force, float **forcef, int nalloc)
{
static float *d_force_static=NULL,*forcef_static=NULL;
static int nalloc_bak=0;
if(nalloc>nalloc_bak){
if(nalloc<NMAX) nalloc=NMAX;
CUDA_SAFE_CALL(cudaFree(d_force_static));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_force_static,sizeof(float)*nalloc*3));
free(forcef_static);
if((forcef_static=(float *)malloc(sizeof(float)*nalloc*3))==NULL){
fprintf(stderr,"** error : can't malloc forcef_static **\n");
exit(1);
}
bzero(forcef_static,sizeof(float)*nalloc*3);
nalloc_bak=nalloc;
}
*d_force=d_force_static;
*forcef=forcef_static;
}
static void malloc_k(VG_KVEC **d_k, VG_KVEC **kf, int kalloc, int nthre)
{
static VG_KVEC *d_k_static=NULL,*kf_static=NULL;
static int kalloc_bak=0;
if(kalloc>kalloc_bak){
if(kalloc<KMAX) kalloc=KMAX;
CUDA_SAFE_CALL(cudaFree(d_k_static));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_k_static,sizeof(VG_KVEC)*(kalloc+nthre)));
free(kf_static);
if((kf_static=(VG_KVEC *)malloc(sizeof(VG_KVEC)*(kalloc+nthre)))==NULL){
fprintf(stderr,"** error : can't malloc kf_static **\n");
exit(1);
}
kalloc_bak=kalloc;
}
*d_k=d_k_static;
*kf=kf_static;
}
static void malloc_and_make_index(int n, int nat, int atype[], int **index_ret)
{
int i,at,na[ATYPE],offset[ATYPE],*index;
if((index=(int *)malloc(sizeof(int)*n))==NULL){
fprintf(stderr,"** error : can't malloc index **\n");
exit(1);
}
for(at=0;at<nat;at++) na[at]=0;
for(i=0;i<n;i++) na[atype[i]]++;
offset[0]=0;
for(at=1;at<nat;at++) offset[at]=offset[at-1]+na[at-1];
for(at=0;at<nat;at++) na[at]=0;
for(i=0;i<n;i++){
at=atype[i];
index[i]=offset[at]+na[at];// i:original, index[i]:new
na[at]++;
}
*index_ret=index;
}
static void free_index(int *index)
{
free(index);
}
static void send_matrix(int nat, double *gscale, double *rscale)
{
int i,j;
VG_MATRIX *matrix;
// send matrix
if((matrix=(VG_MATRIX *)malloc(sizeof(VG_MATRIX)*nat*nat))==NULL){
fprintf(stderr,"** error : can't malloc matrix **\n");
exit(1);
}
for(i=0;i<nat;i++){
for(j=0;j<nat;j++){
matrix[i*nat+j].gscale=(float)(gscale[i*nat+j]);
matrix[i*nat+j].rscale=(float)(rscale[i*nat+j]);
}
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_matrix,matrix,sizeof(VG_MATRIX)*nat*nat));
free(matrix);
}
static void copy_and_send_x(int n, double *x, int *atype, int *index,
double xmax_1[3], VG_XVEC *vec, VG_XVEC *d_x)
{
int i,j;
DI2 di2;
for(i=0;i<n;i++){
int idx=index[i];
for(j=0;j<3;j++){
di2.d=x[i*3+j]*xmax_1[j]+0x180000;
vec[idx].r[j]=di2.fi2.fi0.i;
}
vec[idx].qatype.atype=atype[i];
}
CUDA_SAFE_CALL(cudaMemcpy(d_x,vec,sizeof(VG_XVEC)*n,cudaMemcpyHostToDevice));
}
static void copy_and_send_xq(int n, double *x, double *q,
double xmax_1[3], VG_XVEC *vec, VG_XVEC *d_x)
{
int i,j;
DI2 di2;
for(i=0;i<n;i++){
for(j=0;j<3;j++){
di2.d=x[i*3+j]*xmax_1[j]+0x180000;
vec[i].r[j]=di2.fi2.fi0.i;
}
vec[i].qatype.q=q[i];
}
CUDA_SAFE_CALL(cudaMemcpy(d_x,vec,sizeof(VG_XVEC)*n,cudaMemcpyHostToDevice));
}
static void copy_and_send_k(int knum, int *k, double alpha, double epsilon,
double cellsize_1[3],
VG_KVEC *vec, VG_KVEC *d_k)
{
int i,j;
double ktmp,r2,kvtmp,eps1=1.0/epsilon,vol1;
double alpha4=1.0/(4.0*alpha*alpha);
for(j=0,vol1=1.0;j<3;j++) vol1*=cellsize_1[j];
for(i=0;i<knum;i++){
for(j=0,r2=0.0;j<3;j++){
ktmp=(double)k[i*3+j];
vec[i].k[j]=(float)ktmp;
kvtmp=2.0*M_PI*ktmp*cellsize_1[j];
r2+=kvtmp*kvtmp;
}
vec[i].factor1=2.0*eps1*vol1*exp(-r2*alpha4)/r2;
}
CUDA_SAFE_CALL(cudaMemcpy(d_k,vec,sizeof(VG_KVEC)*knum,cudaMemcpyHostToDevice));
}
static void get_result_q(int n, float *d_force, float *forcef,
double *q, double rfac, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
double factor;
CUDA_SAFE_CALL(cudaMemcpy(forcef,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost));
for(i=0;i<n;i++){
factor=q[i]*rfac;
for(j=0;j<3;j++) force[i*3+j]+=forcef[i*3+j]*factor;
}
}
static void get_result_q3(int n, float *d_force, float *forcef,
double *q, double rfac[3], double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
double factor;
CUDA_SAFE_CALL(cudaMemcpy(forcef,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost));
for(i=0;i<n;i++){
for(j=0;j<3;j++){
factor=q[i]*rfac[j];
force[i*3+j]+=forcef[i*3+j]*factor;
}
}
}
static void get_result(int n, float *d_force, float *forcef, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
CUDA_SAFE_CALL(cudaMemcpy(forcef,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost));
for(i=0;i<n;i++) for(j=0;j<3;j++) force[i*3+j]+=forcef[i*3+j];
}
static void get_result_index(int n, int *index, float *d_force, float *forcef, double *force)
{
// copy GPU result to host, and convert it to double
int i,j;
CUDA_SAFE_CALL(cudaMemcpy(forcef,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost));
for(i=0;i<n;i++) for(j=0;j<3;j++) force[i*3+j]+=forcef[index[i]*3+j];
}
void MR3calccoulomb_ij(int ni, double xi[], double qi[], double force[],
int nj, double xj[], double qj[],
double rscale,
int tblno, double xmax, int periodicflag)
{
VG_XVEC *d_xi,*xif;
VG_XVEC *d_xj,*xjf;
float *d_force,*forcef;
if((periodicflag & 1)==0) xmax*=2.0;
double xmax_1[3]={1.0/xmax,1.0/xmax,1.0/xmax};
float r2min=MD_REAL_R2MIN,r2max=MD_REAL_R2MAX;
float rscale2f=(float)(rscale*rscale);
malloc_x(&d_xi,&xif,ni,NTHRE);
malloc_x2(&d_xj,&xjf,nj,NTHRE);
malloc_f(&d_force,&forcef,ni);
copy_and_send_xq(ni,xi,qi,xmax_1,xif,d_xi);
copy_and_send_xq(nj,xj,qj,xmax_1,xjf,d_xj);
switch(tblno){
case 0:
coulombforce_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,d_force);
break;
case 1:
coulombpot_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,d_force);
break;
case 6:
realforce_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,r2min,r2max,d_force);
break;
case 7:
realpot_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
rscale2f,(float)xmax,r2min,r2max,d_force);
break;
default:
fprintf(stderr,"** error : not supported tblno = %d **\n",tblno);
exit(1);
break;
}
CUT_CHECK_ERROR("Kernel execution failed");
if(tblno==0 || tblno==1) get_result(ni,d_force,forcef,force);
else if(tblno==6) get_result_q(ni,d_force,forcef,qi,rscale*rscale*rscale,force);
else if(tblno==7) get_result_q(ni,d_force,forcef,qi,rscale,force);
}
void MR3calcvdw_ij(int ni, double xi[], int atypei[], double force[],
int nj, double xj[], int atypej[],
int nat, double gscale[], double rscale[],
int tblno, double xmax, int periodicflag)
{
VG_XVEC *d_xi,*xif;
VG_XVEC *d_xj,*xjf;
float *d_force,*forcef;
int *indexi,*indexj;
if((periodicflag & 1)==0) xmax*=2.0;
double xmax_1[3]={1.0/xmax,1.0/xmax,1.0/xmax};
float r2min=MD_LJ_R2MIN,r2max=MD_LJ_R2MAX;
if(nat>ATYPE){
fprintf(stderr,"** error : nat is too large **\n");
exit(1);
}
malloc_x(&d_xi,&xif,ni,NTHRE);
malloc_x2(&d_xj,&xjf,nj,NTHRE);
malloc_f(&d_force,&forcef,ni);
malloc_and_make_index(ni,nat,atypei,&indexi);
malloc_and_make_index(nj,nat,atypej,&indexj);
send_matrix(nat,gscale,rscale);
copy_and_send_x(ni,xi,atypei,indexi,xmax_1,xif,d_xi);
copy_and_send_x(nj,xj,atypej,indexj,xmax_1,xjf,d_xj);
switch(tblno){
case 2:
vdwforce_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
nat,(float)xmax,r2min,r2max,d_force);
break;
case 3:
vdwpot_kernel<<< (ni+NTHRE-1)/NTHRE, NTHRE >>>(ni,d_xi,nj,d_xj,
nat,(float)xmax,r2min,r2max,d_force);
break;
default:
fprintf(stderr,"** error : not supported tblno = %d **\n",tblno);
exit(1);
break;
}
CUT_CHECK_ERROR("Kernel execution failed");
get_result_index(ni,indexi,d_force,forcef,force);
free_index(indexi);
free_index(indexj);
}
void MR3calcewald(int *k, int knum_org, double *x, int n, double *q,
double alpha, double epsilon, double cell[3][3],
double *force, double *tpot, double stress[3][3])
{
VG_XVEC *d_x,*xf;
float *d_force,*forcef;
int knum;
VG_KVEC *d_k,*kf;
float *d_bsbc,*bsbcf;
double cellsize_1[3]={1.0/cell[0][0],1.0/cell[1][1],1.0/cell[2][2]};
if(knum_org>=0) knum=knum_org;
else knum=-knum_org;
malloc_x(&d_x,&xf,n,NTHRE);
malloc_f(&d_force,&forcef,n);
malloc_k(&d_k,&kf,knum,NTHRE);
malloc_f(&d_bsbc,&bsbcf,knum);
copy_and_send_xq(n,x,q,cellsize_1,xf,d_x);
copy_and_send_k(knum,k,alpha,epsilon,cellsize_1,kf,d_k);
ewald_dft_kernel<<< (knum+NTHRE-1)/NTHRE, NTHRE >>>(d_x,n,d_k,knum,d_bsbc);
CUDA_SAFE_CALL(cudaMemcpy(bsbcf,d_bsbc,sizeof(float)*knum*3,cudaMemcpyDeviceToHost));
*tpot=0.0;
for(int i=0;i<knum;i++){
*tpot+=(bsbcf[i*3]*bsbcf[i*3]+bsbcf[i*3+1]*bsbcf[i*3+1])*0.5/kf[i].factor1;
}
if(knum_org>=0){
ewaldforce_idft_kernel<<< (n+NTHRE-1)/NTHRE, NTHRE >>>(d_x,n,d_k,knum,d_bsbc,d_force);
for(int j=0;j<3;j++) cellsize_1[j] *= 2.0 * M_PI;
get_result_q3(n,d_force,forcef,q,cellsize_1,force);
}
else{
ewaldpot_idft_kernel<<< (n+NTHRE-1)/NTHRE, NTHRE >>>(d_x,n,d_k,knum,d_bsbc,d_force);
get_result_q(n,d_force,forcef,q,1.0,force);
}
}
|
71c3cefb247fdd7d2a392cc1f62db229eca6f001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col, const int kstride_h = 1, const int kstride_w = 1) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; i++) {
for (int j = 0; j < kernel_w; j++) {
int h = h_in + i * kstride_h;
int w = w_in + j * kstride_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * kstride_h * width + j*kstride_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col, const int kstride_h, const int kstride_w) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;
int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;
int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col, kstride_h, kstride_w);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col, const int kstride_h, const int kstride_w);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col, const int kstride_h, const int kstride_w);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
__global__ void fcn_col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
const int kstride_h, const int kstride_w,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int ksize_h = (patch_h - 1) * kstride_h + 1;
int ksize_w = (patch_w - 1) * kstride_w + 1;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize_w) ? 0 : (w - ksize_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < ksize_h) ? 0 : (h - ksize_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int w_in_col = w - w_col * stride_w;
if (w_in_col % kstride_w == 0) {
w_in_col = w_in_col / kstride_w;
int h_in_col = (h - h_col * stride_h) / kstride_h;
int c_in_col = h_in_col*patch_w + w_in_col;
int c_global = c * patch_w * patch_h + c_in_col;
val += data_col[(c_global * height_col + h_col) * width_col + w_col];
}
}
}
// int offset =
// (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
// int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
// int coeff_w_col = (1 - stride_w * height_col * width_col);
// for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
// for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
// }
// }
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void fcn_backward_col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w,
Dtype* data_im) {
int ksize_h = (patch_h - 1) * kstride_h + 1;
int ksize_w = (patch_w -1) * kstride_w + 1;
int height_col = (height + 2 * pad_h - ksize_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ksize_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( fcn_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col,
kstride_h, kstride_w, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
template void fcn_backward_col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w, float* data_im);
template void fcn_backward_col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w, double* data_im);
} // namespace caffe
| 71c3cefb247fdd7d2a392cc1f62db229eca6f001.cu | #include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col, const int kstride_h = 1, const int kstride_w = 1) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; i++) {
for (int j = 0; j < kernel_w; j++) {
int h = h_in + i * kstride_h;
int w = w_in + j * kstride_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * kstride_h * width + j*kstride_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col, const int kstride_h, const int kstride_w) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int ext_kernel_h = (kernel_h - 1) * kstride_h + 1;
int ext_kernel_w = (kernel_w - 1) * kstride_w + 1;
int height_col = (height + 2 * pad_h - ext_kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ext_kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col, kstride_h, kstride_w);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col, const int kstride_h, const int kstride_w);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col, const int kstride_h, const int kstride_w);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
__global__ void fcn_col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
const int kstride_h, const int kstride_w,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int ksize_h = (patch_h - 1) * kstride_h + 1;
int ksize_w = (patch_w - 1) * kstride_w + 1;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize_w) ? 0 : (w - ksize_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < ksize_h) ? 0 : (h - ksize_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int w_in_col = w - w_col * stride_w;
if (w_in_col % kstride_w == 0) {
w_in_col = w_in_col / kstride_w;
int h_in_col = (h - h_col * stride_h) / kstride_h;
int c_in_col = h_in_col*patch_w + w_in_col;
int c_global = c * patch_w * patch_h + c_in_col;
val += data_col[(c_global * height_col + h_col) * width_col + w_col];
}
}
}
// int offset =
// (c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
// int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
// int coeff_w_col = (1 - stride_w * height_col * width_col);
// for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
// for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
// }
// }
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void fcn_backward_col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w,
Dtype* data_im) {
int ksize_h = (patch_h - 1) * kstride_h + 1;
int ksize_w = (patch_w -1) * kstride_w + 1;
int height_col = (height + 2 * pad_h - ksize_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ksize_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
fcn_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col,
kstride_h, kstride_w, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
template void fcn_backward_col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w, float* data_im);
template void fcn_backward_col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int kstride_h, const int kstride_w, double* data_im);
} // namespace caffe
|
c7f23df7fd9aab876387de351e79658edd9f8beb.hip | // !!! This is a file automatically generated by hipify!!!
#include "open_acc_map_header.cuh"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
__constant__ int dev_a;
__global__ void cudaFunction(int *b)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index<CUDASIZE)
{
b[index] = b[index]-3;
}
}
void wrapper(int c)
{
int b[CUDASIZE];
for(int a=0;a<CUDASIZE;a++)
{
b[a] = c+a*c;
printf("b[%d] = %d;\n", a, b[a]);
}
int *dev_b;
hipMalloc((void**)&dev_b, CUDASIZE*sizeof(int));
hipMemcpy(dev_b, b, CUDASIZE*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaFunction), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_b);
hipMemcpy(b, dev_b, CUDASIZE*sizeof(int), hipMemcpyDeviceToHost);
printf("AFTER\n");
for(int a=0;a<CUDASIZE;a++)
{
printf("b[%d] = %d;\n", a, b[a]);
}
hipFree(dev_b);
}
| c7f23df7fd9aab876387de351e79658edd9f8beb.cu | #include "open_acc_map_header.cuh"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <cuda_runtime.h>
__constant__ int dev_a;
__global__ void cudaFunction(int *b)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index<CUDASIZE)
{
b[index] = b[index]-3;
}
}
void wrapper(int c)
{
int b[CUDASIZE];
for(int a=0;a<CUDASIZE;a++)
{
b[a] = c+a*c;
printf("b[%d] = %d;\n", a, b[a]);
}
int *dev_b;
cudaMalloc((void**)&dev_b, CUDASIZE*sizeof(int));
cudaMemcpy(dev_b, b, CUDASIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaFunction<<<BLOCKS, THREADS>>>(dev_b);
cudaMemcpy(b, dev_b, CUDASIZE*sizeof(int), cudaMemcpyDeviceToHost);
printf("AFTER\n");
for(int a=0;a<CUDASIZE;a++)
{
printf("b[%d] = %d;\n", a, b[a]);
}
cudaFree(dev_b);
}
|
d04554b484219f62ce79663c89732ad8f2f696e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ magmaDoubleComplex shared_data[];
/******************************************************************************/
static __device__
void zlaswp_rowparallel_devfunc(
int n, int width, int height,
magmaDoubleComplex *dA, int lda,
magmaDoubleComplex *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
magmaDoubleComplex *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void zlaswp_rowparallel_kernel(
int n, int width, int height,
magmaDoubleComplex *dinput, int ldi,
magmaDoubleComplex *doutput, int ldo,
magma_int_t* pivinfo)
{
zlaswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void zlaswp_rowparallel_kernel_batched(
int n, int width, int height,
magmaDoubleComplex **input_array, int ldi,
magmaDoubleComplex **output_array, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
zlaswp_rowparallel_devfunc(n, width, height, input_array[batchid], ldi, output_array[batchid], ldo, pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_zlaswp_rowparallel_batched( magma_int_t n,
magmaDoubleComplex** input_array, magma_int_t ldi,
magmaDoubleComplex** output_array, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaDoubleComplex) * height * n;
hipLaunchKernelGGL(( zlaswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input_array, ldi, output_array, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(magmaDoubleComplex) * height * SWP_WIDTH;
hipLaunchKernelGGL(( zlaswp_rowparallel_kernel_batched)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input_array, ldi, output_array, ldo, pivinfo_array );
}
}
/******************************************************************************/
extern "C" void
magma_zlaswp_rowparallel(
magma_int_t n,
magmaDoubleComplex* input, magma_int_t ldi,
magmaDoubleComplex* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_zlaswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaDoubleComplex) * height * n;
hipLaunchKernelGGL(( zlaswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(magmaDoubleComplex) * height * SWP_WIDTH;
hipLaunchKernelGGL(( zlaswp_rowparallel_kernel)
, dim3(grid), dim3(height), shmem, queue->cuda_stream() ,
n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void zlaswp_rowserial_kernel_batched( int n, magmaDoubleComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaDoubleComplex* dA = dA_array[blockIdx.z];
magma_int_t *d_ipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
magmaDoubleComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_zlaswp_rowserial_batched(magma_int_t n, magmaDoubleComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( zlaswp_rowserial_kernel_batched)
, dim3(grid), dim3(max(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__global__ void zlaswp_columnserial_kernel_batched( int n, magmaDoubleComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaDoubleComplex* dA = dA_array[blockIdx.z];
magma_int_t *d_ipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
magmaDoubleComplex A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_zlaswp_columnserial_batched(magma_int_t n, magmaDoubleComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
hipLaunchKernelGGL(( zlaswp_columnserial_kernel_batched)
, dim3(grid), dim3(min(BLK_SIZE, n)), 0, queue->cuda_stream() ,
n, dA_array, lda, k1, k2, ipiv_array);
}
| d04554b484219f62ce79663c89732ad8f2f696e9.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#define BLK_SIZE 256
// SWP_WIDTH is number of threads in a block
// 64 and 256 are better on Kepler;
extern __shared__ magmaDoubleComplex shared_data[];
/******************************************************************************/
static __device__
void zlaswp_rowparallel_devfunc(
int n, int width, int height,
magmaDoubleComplex *dA, int lda,
magmaDoubleComplex *dout, int ldo,
magma_int_t* pivinfo)
{
//int height = k2- k1;
//int height = blockDim.x;
unsigned int tid = threadIdx.x;
dA += SWP_WIDTH * blockIdx.x * lda;
dout += SWP_WIDTH * blockIdx.x * ldo;
magmaDoubleComplex *sdata = shared_data;
if (blockIdx.x == gridDim.x -1)
{
width = n - blockIdx.x * SWP_WIDTH;
}
if (tid < height)
{
int mynewroworig = pivinfo[tid]-1; //-1 to get the index in C
int itsreplacement = pivinfo[mynewroworig] -1; //-1 to get the index in C
#pragma unroll
for (int i=0; i < width; i++)
{
sdata[ tid + i * height ] = dA[ mynewroworig + i * lda ];
dA[ mynewroworig + i * lda ] = dA[ itsreplacement + i * lda ];
}
}
__syncthreads();
if (tid < height)
{
// copy back the upper swapped portion of A to dout
#pragma unroll
for (int i=0; i < width; i++)
{
dout[tid + i * ldo] = sdata[tid + i * height];
}
}
}
/******************************************************************************/
// parallel swap the swaped dA(1:nb,i:n) is stored in dout
__global__
void zlaswp_rowparallel_kernel(
int n, int width, int height,
magmaDoubleComplex *dinput, int ldi,
magmaDoubleComplex *doutput, int ldo,
magma_int_t* pivinfo)
{
zlaswp_rowparallel_devfunc(n, width, height, dinput, ldi, doutput, ldo, pivinfo);
}
/******************************************************************************/
__global__
void zlaswp_rowparallel_kernel_batched(
int n, int width, int height,
magmaDoubleComplex **input_array, int ldi,
magmaDoubleComplex **output_array, int ldo,
magma_int_t** pivinfo_array)
{
int batchid = blockIdx.z;
zlaswp_rowparallel_devfunc(n, width, height, input_array[batchid], ldi, output_array[batchid], ldo, pivinfo_array[batchid]);
}
/******************************************************************************/
extern "C" void
magma_zlaswp_rowparallel_batched( magma_int_t n,
magmaDoubleComplex** input_array, magma_int_t ldi,
magmaDoubleComplex** output_array, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t **pivinfo_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > 1024)
{
fprintf( stderr, "%s: n=%lld > 1024, not supported\n", __func__, (long long) n );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, batchCount);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaDoubleComplex) * height * n;
zlaswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input_array, ldi, output_array, ldo, pivinfo_array );
}
else
{
size_t shmem = sizeof(magmaDoubleComplex) * height * SWP_WIDTH;
zlaswp_rowparallel_kernel_batched
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input_array, ldi, output_array, ldo, pivinfo_array );
}
}
/******************************************************************************/
extern "C" void
magma_zlaswp_rowparallel(
magma_int_t n,
magmaDoubleComplex* input, magma_int_t ldi,
magmaDoubleComplex* output, magma_int_t ldo,
magma_int_t k1, magma_int_t k2,
magma_int_t *pivinfo,
magma_queue_t queue)
{
if (n == 0 ) return;
int height = k2-k1;
if ( height > MAX_NTHREADS)
{
fprintf( stderr, "%s: height=%lld > %lld, magma_zlaswp_rowparallel_q not supported\n",
__func__, (long long) n, (long long) MAX_NTHREADS );
}
int blocks = magma_ceildiv( n, SWP_WIDTH );
dim3 grid(blocks, 1, 1);
if ( n < SWP_WIDTH)
{
size_t shmem = sizeof(magmaDoubleComplex) * height * n;
zlaswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, n, height, input, ldi, output, ldo, pivinfo );
}
else
{
size_t shmem = sizeof(magmaDoubleComplex) * height * SWP_WIDTH;
zlaswp_rowparallel_kernel
<<< grid, height, shmem, queue->cuda_stream() >>>
( n, SWP_WIDTH, height, input, ldi, output, ldo, pivinfo );
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row
__global__ void zlaswp_rowserial_kernel_batched( int n, magmaDoubleComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaDoubleComplex* dA = dA_array[blockIdx.z];
magma_int_t *d_ipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if (tid < n) {
magmaDoubleComplex A1;
for (int i1 = k1; i1 < k2; i1++)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 + tid * lda];
dA[i1 + tid * lda] = dA[i2 + tid * lda];
dA[i2 + tid * lda] = A1;
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one row by one row, similar to LAPACK
// K1, K2 are in Fortran indexing
extern "C" void
magma_zlaswp_rowserial_batched(magma_int_t n, magmaDoubleComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
zlaswp_rowserial_kernel_batched
<<< grid, max(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
/******************************************************************************/
// serial swap that does swapping one column by one column
__global__ void zlaswp_columnserial_kernel_batched( int n, magmaDoubleComplex **dA_array, int lda, int k1, int k2, magma_int_t** ipiv_array )
{
magmaDoubleComplex* dA = dA_array[blockIdx.z];
magma_int_t *d_ipiv = ipiv_array[blockIdx.z];
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
k1--;
k2--;
if ( k1 < 0 || k2 < 0 ) return;
if ( tid < n) {
magmaDoubleComplex A1;
if (k1 <= k2)
{
for (int i1 = k1; i1 <= k2; i1++)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
} else
{
for (int i1 = k1; i1 >= k2; i1--)
{
int i2 = d_ipiv[i1] - 1; // Fortran index, switch i1 and i2
if ( i2 != i1)
{
A1 = dA[i1 * lda + tid];
dA[i1 * lda + tid] = dA[i2 * lda + tid];
dA[i2 * lda + tid] = A1;
}
}
}
}
}
/******************************************************************************/
// serial swap that does swapping one column by one column
// K1, K2 are in Fortran indexing
extern "C" void
magma_zlaswp_columnserial_batched(magma_int_t n, magmaDoubleComplex** dA_array, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
magma_int_t **ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
if (n == 0 ) return;
int blocks = magma_ceildiv( n, BLK_SIZE );
dim3 grid(blocks, 1, batchCount);
zlaswp_columnserial_kernel_batched
<<< grid, min(BLK_SIZE, n), 0, queue->cuda_stream() >>>
(n, dA_array, lda, k1, k2, ipiv_array);
}
|
9a9218dc001e5a2de390b0d266c5478a414c6521.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define N 16
int testfunc()
{
float* A;
float* B;
float* C;
hipMalloc((void**)&A, sizeof(float)*N);
hipMalloc((void**)&B, sizeof(float)*N);
hipMalloc((void**)&C, sizeof(float)*N);
//hipFree(A);
//hipFree(B);
hipFree(C);
return 0;
}
int main()
{
testfunc();
return 0;
}
| 9a9218dc001e5a2de390b0d266c5478a414c6521.cu | #include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define N 16
int testfunc()
{
float* A;
float* B;
float* C;
cudaMalloc((void**)&A, sizeof(float)*N);
cudaMalloc((void**)&B, sizeof(float)*N);
cudaMalloc((void**)&C, sizeof(float)*N);
//cudaFree(A);
//cudaFree(B);
cudaFree(C);
return 0;
}
int main()
{
testfunc();
return 0;
}
|
e0e7f2c3762cb7158471e0f55116c6958f146dbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//======================================
//
//
// GPU
//======================================
#include"stdafx.h"
#include"ChooseBox_DATA.hpp"
#include"ChooseBox_FUNC.hpp"
#include"ChooseBox_Base.h"
#include"ChooseBox_GPU.cuh"
#include"ChooseBox_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
#define CALC_BATCH_MAX (256)
#define CALC_INPUT_MAX (1024)
__global__ void device_ChooseBox(
U32 chCount,
U32 startX, U32 startY, U32 startZ,
U32 inputXCount, U32 inputYCount, U32 inputZCount,
U32 outputXCount, U32 outputYCount, U32 outputZCount,
const F32 lpInputBuffer[],
F32 lpOutputBuffer[])
{
U32 batchNum = blockIdx.y;
U32 ch = blockIdx.x;
U32 x = threadIdx.x;
U32 y = threadIdx.y;
U32 z = threadIdx.z;
U32 inputX = startX + x;
U32 inputY = startY + y;
U32 inputZ = startZ + z;
U32 inputOffset = CalculateOffset(batchNum, chCount, inputXCount, inputYCount, inputZCount, ch, inputX, inputY, inputZ);
U32 outputOffset = CalculateOffset(batchNum, chCount, outputXCount, outputYCount, outputZCount, ch, x, y, z);
lpOutputBuffer[outputOffset] = lpInputBuffer[inputOffset];
}
__global__ void device_ReChooseBox(
U32 chCount,
U32 startX, U32 startY, U32 startZ,
U32 inputXCount, U32 inputYCount, U32 inputZCount,
U32 outputXCount, U32 outputYCount, U32 outputZCount,
const F32 lpDOutputBuffer[],
F32 lpDInputBuffer[])
{
U32 batchNum = blockIdx.y;
U32 ch = blockIdx.x;
U32 x = threadIdx.x;
U32 y = threadIdx.y;
U32 z = threadIdx.z;
U32 inputX = startX + x;
U32 inputY = startY + y;
U32 inputZ = startZ + z;
U32 inputOffset = CalculateOffset(batchNum, chCount, inputXCount, inputYCount, inputZCount, ch, inputX, inputY, inputZ);
U32 outputOffset = CalculateOffset(batchNum, chCount, outputXCount, outputYCount, outputZCount, ch, x, y, z);
lpDInputBuffer[inputOffset] = lpDOutputBuffer[outputOffset];
}
/** */
ChooseBox_GPU::ChooseBox_GPU(Gravisbell::GUID guid, ChooseBox_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: ChooseBox_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< */
, inputBufferCount (0) /**< */
, outputBufferCount (0) /**< */
{
hipblasCreate(&cublasHandle);
}
/** */
ChooseBox_GPU::~ChooseBox_GPU()
{
hipblasDestroy(cublasHandle);
}
//================================
//
//================================
/** */
U32 ChooseBox_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** .
@return 0 */
ErrorCode ChooseBox_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
//
//===========================
/** */
ChooseBox_LayerData_Base& ChooseBox_GPU::GetLayerData()
{
return this->layerData;
}
const ChooseBox_LayerData_Base& ChooseBox_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
//
//================================
/** .()
@param batchSize .
NN.
PreProcessLearnLoop. */
ErrorCode ChooseBox_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** .()
@param batchSize .
NN.
Calculate. */
ErrorCode ChooseBox_GPU::PreProcessCalculate()
{
//
this->inputBufferCount = this->GetInputBufferCount();
if(this->inputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
//
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate. */
ErrorCode ChooseBox_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@param lpInputBuffer . GetInputBufferCount
@return 0 */
ErrorCode ChooseBox_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
//
hipMemset(o_lppOutputBuffer, 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize());
dim3 grid(
this->GetOutputDataStruct().ch,
this->GetBatchSize());
dim3 block(
this->layerData.layerStructure.boxSize.x,
this->layerData.layerStructure.boxSize.y,
this->layerData.layerStructure.boxSize.z);
hipLaunchKernelGGL(( device_ChooseBox), dim3(grid), dim3(block), 0, 0,
this->GetOutputDataStruct().ch,
this->layerData.layerStructure.startPosition.x, this->layerData.layerStructure.startPosition.y, this->layerData.layerStructure.startPosition.z,
this->GetInputDataStruct().x, this->GetInputDataStruct().y, this->GetInputDataStruct().z,
this->layerData.layerStructure.boxSize.x, this->layerData.layerStructure.boxSize.y, this->layerData.layerStructure.boxSize.z,
i_lppInputBuffer,
o_lppOutputBuffer);
#if _DEBUG
std::vector<F32> lpInputBuffer(this->inputBufferCount * this->GetBatchSize());
hipMemcpy(&lpInputBuffer[0], i_lppInputBuffer, sizeof(F32) * this->inputBufferCount * this->GetBatchSize(), hipMemcpyDeviceToHost);
std::vector<F32> lpOutputBuffer(this->outputBufferCount * this->GetBatchSize());
hipMemcpy(&lpOutputBuffer[0], o_lppOutputBuffer, sizeof(F32) * this->outputBufferCount * this->GetBatchSize(), hipMemcpyDeviceToHost);
#endif
return ErrorCode::ERROR_CODE_NONE;
}
//================================
//
//================================
/** ..
Calculate.
@param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()].
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()][GetOutputDataCount()]
*/
ErrorCode ChooseBox_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
//
if(o_lppDInputBuffer)
{
//
hipMemset(o_lppDInputBuffer, 0, sizeof(F32)*this->inputBufferCount*this->GetBatchSize());
dim3 grid(
this->GetOutputDataStruct().ch,
this->GetBatchSize());
dim3 block(
this->layerData.layerStructure.boxSize.x,
this->layerData.layerStructure.boxSize.y,
this->layerData.layerStructure.boxSize.z);
hipLaunchKernelGGL(( device_ReChooseBox), dim3(grid), dim3(block), 0, 0,
this->GetOutputDataStruct().ch,
this->layerData.layerStructure.startPosition.x, this->layerData.layerStructure.startPosition.y, this->layerData.layerStructure.startPosition.z,
this->GetInputDataStruct().x, this->GetInputDataStruct().y, this->GetInputDataStruct().z,
this->layerData.layerStructure.boxSize.x, this->layerData.layerStructure.boxSize.y, this->layerData.layerStructure.boxSize.z,
i_lppDOutputBuffer,
o_lppDInputBuffer);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate.
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()].
*/
ErrorCode ChooseBox_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
| e0e7f2c3762cb7158471e0f55116c6958f146dbb.cu | //======================================
// フィードフォワードニューラルネットワークの統合処理レイヤー
// 結合、活性化
// GPU処理用
//======================================
#include"stdafx.h"
#include"ChooseBox_DATA.hpp"
#include"ChooseBox_FUNC.hpp"
#include"ChooseBox_Base.h"
#include"ChooseBox_GPU.cuh"
#include"ChooseBox_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
#define CALC_BATCH_MAX (256)
#define CALC_INPUT_MAX (1024)
__global__ void device_ChooseBox(
U32 chCount,
U32 startX, U32 startY, U32 startZ,
U32 inputXCount, U32 inputYCount, U32 inputZCount,
U32 outputXCount, U32 outputYCount, U32 outputZCount,
const F32 lpInputBuffer[],
F32 lpOutputBuffer[])
{
U32 batchNum = blockIdx.y;
U32 ch = blockIdx.x;
U32 x = threadIdx.x;
U32 y = threadIdx.y;
U32 z = threadIdx.z;
U32 inputX = startX + x;
U32 inputY = startY + y;
U32 inputZ = startZ + z;
U32 inputOffset = CalculateOffset(batchNum, chCount, inputXCount, inputYCount, inputZCount, ch, inputX, inputY, inputZ);
U32 outputOffset = CalculateOffset(batchNum, chCount, outputXCount, outputYCount, outputZCount, ch, x, y, z);
lpOutputBuffer[outputOffset] = lpInputBuffer[inputOffset];
}
__global__ void device_ReChooseBox(
U32 chCount,
U32 startX, U32 startY, U32 startZ,
U32 inputXCount, U32 inputYCount, U32 inputZCount,
U32 outputXCount, U32 outputYCount, U32 outputZCount,
const F32 lpDOutputBuffer[],
F32 lpDInputBuffer[])
{
U32 batchNum = blockIdx.y;
U32 ch = blockIdx.x;
U32 x = threadIdx.x;
U32 y = threadIdx.y;
U32 z = threadIdx.z;
U32 inputX = startX + x;
U32 inputY = startY + y;
U32 inputZ = startZ + z;
U32 inputOffset = CalculateOffset(batchNum, chCount, inputXCount, inputYCount, inputZCount, ch, inputX, inputY, inputZ);
U32 outputOffset = CalculateOffset(batchNum, chCount, outputXCount, outputYCount, outputZCount, ch, x, y, z);
lpDInputBuffer[inputOffset] = lpDOutputBuffer[outputOffset];
}
/** コンストラクタ */
ChooseBox_GPU::ChooseBox_GPU(Gravisbell::GUID guid, ChooseBox_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: ChooseBox_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1))
, layerData (i_layerData) /**< レイヤーデータ */
, inputBufferCount (0) /**< 入力バッファ数 */
, outputBufferCount (0) /**< 出力バッファ数 */
{
cublasCreate(&cublasHandle);
}
/** デストラクタ */
ChooseBox_GPU::~ChooseBox_GPU()
{
cublasDestroy(cublasHandle);
}
//================================
// 基本処理
//================================
/** レイヤー種別の取得 */
U32 ChooseBox_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode ChooseBox_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
// レイヤーデータ関連
//===========================
/** レイヤーデータを取得する */
ChooseBox_LayerData_Base& ChooseBox_GPU::GetLayerData()
{
return this->layerData;
}
const ChooseBox_LayerData_Base& ChooseBox_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
// 演算処理
//================================
/** 演算前処理を実行する.(学習用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */
ErrorCode ChooseBox_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算前処理を実行する.(演算用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode ChooseBox_GPU::PreProcessCalculate()
{
// 入力バッファ数を確認
this->inputBufferCount = this->GetInputBufferCount();
if(this->inputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
// 出力バッファ数を確認
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
return ErrorCode::ERROR_CODE_NONE;
}
/** ループの初期化処理.データセットの実行開始前に実行する
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode ChooseBox_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算処理を実行する.
@param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要
@return 成功した場合0が返る */
ErrorCode ChooseBox_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
// 出力バッファの初期化
cudaMemset(o_lppOutputBuffer, 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize());
dim3 grid(
this->GetOutputDataStruct().ch,
this->GetBatchSize());
dim3 block(
this->layerData.layerStructure.boxSize.x,
this->layerData.layerStructure.boxSize.y,
this->layerData.layerStructure.boxSize.z);
device_ChooseBox<<<grid, block>>>(
this->GetOutputDataStruct().ch,
this->layerData.layerStructure.startPosition.x, this->layerData.layerStructure.startPosition.y, this->layerData.layerStructure.startPosition.z,
this->GetInputDataStruct().x, this->GetInputDataStruct().y, this->GetInputDataStruct().z,
this->layerData.layerStructure.boxSize.x, this->layerData.layerStructure.boxSize.y, this->layerData.layerStructure.boxSize.z,
i_lppInputBuffer,
o_lppOutputBuffer);
#if _DEBUG
std::vector<F32> lpInputBuffer(this->inputBufferCount * this->GetBatchSize());
cudaMemcpy(&lpInputBuffer[0], i_lppInputBuffer, sizeof(F32) * this->inputBufferCount * this->GetBatchSize(), cudaMemcpyDeviceToHost);
std::vector<F32> lpOutputBuffer(this->outputBufferCount * this->GetBatchSize());
cudaMemcpy(&lpOutputBuffer[0], o_lppOutputBuffer, sizeof(F32) * this->outputBufferCount * this->GetBatchSize(), cudaMemcpyDeviceToHost);
#endif
return ErrorCode::ERROR_CODE_NONE;
}
//================================
// 学習処理
//================================
/** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要な配列の[GetOutputDataCount()]配列
直前の計算結果を使用する */
ErrorCode ChooseBox_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
// 入力誤差計算
if(o_lppDInputBuffer)
{
// 出力バッファの初期化
cudaMemset(o_lppDInputBuffer, 0, sizeof(F32)*this->inputBufferCount*this->GetBatchSize());
dim3 grid(
this->GetOutputDataStruct().ch,
this->GetBatchSize());
dim3 block(
this->layerData.layerStructure.boxSize.x,
this->layerData.layerStructure.boxSize.y,
this->layerData.layerStructure.boxSize.z);
device_ReChooseBox<<<grid, block>>>(
this->GetOutputDataStruct().ch,
this->layerData.layerStructure.startPosition.x, this->layerData.layerStructure.startPosition.y, this->layerData.layerStructure.startPosition.z,
this->GetInputDataStruct().x, this->GetInputDataStruct().y, this->GetInputDataStruct().z,
this->layerData.layerStructure.boxSize.x, this->layerData.layerStructure.boxSize.y, this->layerData.layerStructure.boxSize.z,
i_lppDOutputBuffer,
o_lppDInputBuffer);
}
return ErrorCode::ERROR_CODE_NONE;
}
/** 学習処理を実行する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要.
直前の計算結果を使用する */
ErrorCode ChooseBox_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
|
a993b354afd6eaaf318070fe2ccd817a1870cfe4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 Xilinx, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include "utils.cuh"
#include "readBin.cuh"
using namespace std;
template<typename T>
void transpose(T *A, T *t_A, int m, int n){
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
t_A[j * m + i] = A[i * n + j];
}
}
}
int main(int argc, char** argv) {
assert(argc >= 4);
int arg = 0;
int p_m = atoi(argv[++arg]);
int p_n = atoi(argv[++arg]);
string dataPath = argv[++arg];
double *m_host_x, *m_host_b, *m_host_A, *m_host_r;
double *m_device_x, *m_device_A, *m_device_r;
double *m_host_tA = new double[p_m * p_n];
readBin(dataPath + "A.mat", m_host_tA, p_m * p_n * sizeof(double));
m_host_A = new double[p_m * p_n];
transpose(m_host_tA, m_host_A, p_m, p_n);
delete[] m_host_tA;
m_host_x = new double[p_n];
readBin(dataPath + "x.mat", m_host_x, p_n * sizeof(double));
m_host_b = new double[p_m];
readBin(dataPath + "b.mat", m_host_b, p_m * sizeof(double));
m_host_r = new double[p_m]();
hipblasHandle_t m_handle;
hipblasStatus_t m_stat;
m_stat = hipblasCreate(&m_handle);
if (m_stat != HIPBLAS_STATUS_SUCCESS) {
printf("CUBLAS create handle failed\n");
}
hipError_t cudaStat = hipMalloc((void**)&m_device_A, p_m * p_n * sizeof(double));
if (cudaStat != hipSuccess) {
printf("device memory allocation for matrix A failed\n");
}
cudaStat = hipMalloc((void**)&m_device_x, p_n * sizeof(double));
if (cudaStat != hipSuccess) {
printf("device memory allocation failed: %d\n", cudaStat);
}
cudaStat = hipMalloc((void**)&m_device_r, p_m * sizeof(double));
if (cudaStat != hipSuccess) {
printf("device memory allocation failed: %d\n", cudaStat);
}
m_stat = hipblasSetMatrix(p_m, p_n, sizeof(double), m_host_A, p_m, m_device_A, p_m);
if (m_stat != HIPBLAS_STATUS_SUCCESS) {
printf("Set matrix A failed: %d\n", m_stat);
}
m_stat = hipblasSetVector(p_n, sizeof(double), m_host_x, 1, m_device_x, 1);
if (m_stat != HIPBLAS_STATUS_SUCCESS) {
printf("Set vector x failed: %d\n", m_stat);
}
m_stat = hipblasSetVector(p_m, sizeof(double), m_host_r, 1, m_device_r, 1);
if (m_stat != HIPBLAS_STATUS_SUCCESS) {
printf("Set vector r failed: %d\n", m_stat);
}
#ifdef BENCHMARK
auto start = chrono::high_resolution_clock::now();
#endif
const double ONE=1, ZERO=0;
m_stat = hipblasDgemv(m_handle, HIPBLAS_OP_N, p_m, p_n, &ONE, m_device_A, p_m, m_device_x, 1, &ZERO,
m_device_r, 1);
if (m_stat != HIPBLAS_STATUS_SUCCESS) {
printf("CUBLAS gemv failed\n");
}
m_stat = hipblasGetVector(p_m, sizeof(double),m_device_r,1,m_host_r,1);
if (m_stat !=HIPBLAS_STATUS_SUCCESS) {
printf("cublas get failed\n");
}
#ifdef BENCHMARK
auto stop = chrono::high_resolution_clock::now();
chrono::duration<double> elapsed = stop - start;
double duration = elapsed.count();
cout << "Execution time is " << duration << "s." << endl;
#endif
int err = 0;
compare(p_m, m_host_b, m_host_r, err);
if (err == 0) {
printf("Results verified.\n");
} else {
printf("There are in total %d error(s).\n", err);
}
hipFree(m_device_A);
hipFree(m_device_x);
hipFree(m_device_r);
delete[] m_host_A;
delete[] m_host_x;
delete[] m_host_r;
delete[] m_host_b;
return 0;
}
| a993b354afd6eaaf318070fe2ccd817a1870cfe4.cu | /*
* Copyright 2019 Xilinx, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cstdlib>
#include <chrono>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include "utils.cuh"
#include "readBin.cuh"
using namespace std;
template<typename T>
void transpose(T *A, T *t_A, int m, int n){
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
t_A[j * m + i] = A[i * n + j];
}
}
}
int main(int argc, char** argv) {
assert(argc >= 4);
int arg = 0;
int p_m = atoi(argv[++arg]);
int p_n = atoi(argv[++arg]);
string dataPath = argv[++arg];
double *m_host_x, *m_host_b, *m_host_A, *m_host_r;
double *m_device_x, *m_device_A, *m_device_r;
double *m_host_tA = new double[p_m * p_n];
readBin(dataPath + "A.mat", m_host_tA, p_m * p_n * sizeof(double));
m_host_A = new double[p_m * p_n];
transpose(m_host_tA, m_host_A, p_m, p_n);
delete[] m_host_tA;
m_host_x = new double[p_n];
readBin(dataPath + "x.mat", m_host_x, p_n * sizeof(double));
m_host_b = new double[p_m];
readBin(dataPath + "b.mat", m_host_b, p_m * sizeof(double));
m_host_r = new double[p_m]();
cublasHandle_t m_handle;
cublasStatus_t m_stat;
m_stat = cublasCreate(&m_handle);
if (m_stat != CUBLAS_STATUS_SUCCESS) {
printf("CUBLAS create handle failed\n");
}
cudaError_t cudaStat = cudaMalloc((void**)&m_device_A, p_m * p_n * sizeof(double));
if (cudaStat != cudaSuccess) {
printf("device memory allocation for matrix A failed\n");
}
cudaStat = cudaMalloc((void**)&m_device_x, p_n * sizeof(double));
if (cudaStat != cudaSuccess) {
printf("device memory allocation failed: %d\n", cudaStat);
}
cudaStat = cudaMalloc((void**)&m_device_r, p_m * sizeof(double));
if (cudaStat != cudaSuccess) {
printf("device memory allocation failed: %d\n", cudaStat);
}
m_stat = cublasSetMatrix(p_m, p_n, sizeof(double), m_host_A, p_m, m_device_A, p_m);
if (m_stat != CUBLAS_STATUS_SUCCESS) {
printf("Set matrix A failed: %d\n", m_stat);
}
m_stat = cublasSetVector(p_n, sizeof(double), m_host_x, 1, m_device_x, 1);
if (m_stat != CUBLAS_STATUS_SUCCESS) {
printf("Set vector x failed: %d\n", m_stat);
}
m_stat = cublasSetVector(p_m, sizeof(double), m_host_r, 1, m_device_r, 1);
if (m_stat != CUBLAS_STATUS_SUCCESS) {
printf("Set vector r failed: %d\n", m_stat);
}
#ifdef BENCHMARK
auto start = chrono::high_resolution_clock::now();
#endif
const double ONE=1, ZERO=0;
m_stat = cublasDgemv(m_handle, CUBLAS_OP_N, p_m, p_n, &ONE, m_device_A, p_m, m_device_x, 1, &ZERO,
m_device_r, 1);
if (m_stat != CUBLAS_STATUS_SUCCESS) {
printf("CUBLAS gemv failed\n");
}
m_stat = cublasGetVector(p_m, sizeof(double),m_device_r,1,m_host_r,1);
if (m_stat !=CUBLAS_STATUS_SUCCESS) {
printf("cublas get failed\n");
}
#ifdef BENCHMARK
auto stop = chrono::high_resolution_clock::now();
chrono::duration<double> elapsed = stop - start;
double duration = elapsed.count();
cout << "Execution time is " << duration << "s." << endl;
#endif
int err = 0;
compare(p_m, m_host_b, m_host_r, err);
if (err == 0) {
printf("Results verified.\n");
} else {
printf("There are in total %d error(s).\n", err);
}
cudaFree(m_device_A);
cudaFree(m_device_x);
cudaFree(m_device_r);
delete[] m_host_A;
delete[] m_host_x;
delete[] m_host_r;
delete[] m_host_b;
return 0;
}
|
a9596dd4b1fbe7697bbbabde8c27937756379e36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "test_global_array.cuh"
// Maintainer: joaander
/*! \file gpu_array_test.cu
\brief GPU kernels for gpu_array_test.cc
\ingroup unit_tests
*/
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
\post All \a num elements in d_data are incremented by 1
*/
__global__ void gpu_add_one_kernel(int *d_data, size_t num)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num)
d_data[idx] = d_data[idx] + 1;
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
gpu_add_one is just a driver for gpu_add_one_kernel()
*/
extern "C" hipError_t gpu_add_one(int *d_data, size_t num)
{
unsigned int block_size = 256;
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)num / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_add_one_kernel), dim3(grid), dim3(threads), 0, 0, d_data, num);
hipDeviceSynchronize();
return hipGetLastError();
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
\post Element \a i in \a d_data is set to \a i * \a i
*/
__global__ void gpu_fill_test_pattern_kernel(int *d_data, size_t num)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num)
d_data[idx] = idx*idx;
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
gpu_fill_test_pattern is just a driver for gpu_fill_test_pattern_kernel()
*/
extern "C" hipError_t gpu_fill_test_pattern(int *d_data, size_t num)
{
unsigned int block_size = 256;
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)num / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_fill_test_pattern_kernel), dim3(grid), dim3(threads), 0, 0, d_data, num);
hipDeviceSynchronize();
return hipGetLastError();
}
| a9596dd4b1fbe7697bbbabde8c27937756379e36.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "test_global_array.cuh"
// Maintainer: joaander
/*! \file gpu_array_test.cu
\brief GPU kernels for gpu_array_test.cc
\ingroup unit_tests
*/
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
\post All \a num elements in d_data are incremented by 1
*/
__global__ void gpu_add_one_kernel(int *d_data, size_t num)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num)
d_data[idx] = d_data[idx] + 1;
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
gpu_add_one is just a driver for gpu_add_one_kernel()
*/
extern "C" hipError_t gpu_add_one(int *d_data, size_t num)
{
unsigned int block_size = 256;
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)num / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_add_one_kernel), dim3(grid), dim3(threads), 0, 0, d_data, num);
hipDeviceSynchronize();
return hipGetLastError();
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
\post Element \a i in \a d_data is set to \a i * \a i
*/
__global__ void gpu_fill_test_pattern_kernel(int *d_data, size_t num)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num)
d_data[idx] = idx*idx;
}
/*! \param d_data Device pointer to the array where the data is held
\param num Number of elements in the array
gpu_fill_test_pattern is just a driver for gpu_fill_test_pattern_kernel()
*/
extern "C" hipError_t gpu_fill_test_pattern(int *d_data, size_t num)
{
unsigned int block_size = 256;
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)num / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_fill_test_pattern_kernel), dim3(grid), dim3(threads), 0, 0, d_data, num);
hipDeviceSynchronize();
return hipGetLastError();
}
|
0e202d7634d9aaaef94c26c83b0e5b0db7789309.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_incremental_map_track.h"
#include <malloc.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "gpu_defines.h"
namespace SySal
{
namespace GPU
{
void PrismMapTracker::make_threads_blocks(int iterations, dim3 &threads, dim3 &blocks)
{
threads.x = __max(1, __min(iterations, m_Prop.maxThreadsPerBlock));
threads.y = threads.z = 1;
blocks.x = int(ceil((double)iterations / (double)threads.x));
blocks.y = blocks.z = 1;
if (threads.x * blocks.x > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d blocks of %d threads requested." , blocks.x, threads.x);
}
}
void PrismMapTracker::HardReset()
{
m_Chainer.HardReset();
m_Tracker.HardReset();
}
PrismMapTracker::PrismMapTracker(int gpuid) : CTOR_INIT(pHThisView), CTOR_INIT(pHLastView)
{
m_Chainer.pThis = this;
m_Tracker.pThis = this;
m_DeviceId = gpuid;
m_ChainDumper = 0;
m_PerformanceCounters.GPU = m_DeviceId;
m_PerformanceCounters.GPUClockMHz = 0;
m_PerformanceCounters.GPUCores = 0;
m_PerformanceCounters.MapTimeMS = 0;
m_PerformanceCounters.TrackTimeMS = 0;
m_PerformanceCounters.Clusters = 0;
m_PerformanceCounters.Chains = 0;
m_PerformanceCounters.Tracks = 0;
if (hipGetDeviceProperties(&m_Prop, m_DeviceId)) throw "Invalid CUDA device.";
m_PerformanceCounters.GPUClockMHz = m_Prop.clockRate / 1000;
int cores = 0;
if (m_Prop.major == 1)
{
cores = 8;
}
else if (m_Prop.major == 2)
{
if (m_Prop.minor == 0)
cores = 32;
else if (m_Prop.minor == 1)
cores = 48;
}
else if (m_Prop.major == 3)
{
cores = 192;
}
else if (m_Prop.major == 5)
{
cores = 128;
}
m_PerformanceCounters.GPUCores = m_Prop.multiProcessorCount * cores;
printf("\n\nCUDA properties for device %d\nCompute capability %d.%d\nMaxThreadsPerMultiProcessor %d\nMaxThreadsPerBlock %d\nMultiprocessors %d\nMaxGridSize: %d %d %d\nMemory %d MB\n\n",
gpuid, m_Prop.major, m_Prop.minor, m_Prop.maxThreadsPerMultiProcessor, m_Prop.maxThreadsPerBlock, m_Prop.multiProcessorCount, m_Prop.maxGridSize[0], m_Prop.maxGridSize[1], m_Prop.maxGridSize[2], m_Prop.totalGlobalMem / 1048576);
}
PrismMapTracker::~PrismMapTracker()
{
HOST_DEALLOC(pHLastView);
HOST_DEALLOC(pHThisView);
}
void PrismMapTracker::SetChainDumper(void *pContext, ChainDumper dmp)
{
m_ChainDumper = dmp;
m_CDContext = pContext;
}
void PrismMapTracker::SendViewsToTracker(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
if (m_ChainDumper)
{
int sz;
HOST_WISE_ALLOC(pHLastView, sizeof(ChainView));
hipMemcpy(pHLastView, m_Chainer.pLastView, sizeof(ChainView), hipMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHLastView, sz);
hipMemcpy(pHLastView, m_Chainer.pLastView, sz, hipMemcpyDeviceToHost);
HOST_WISE_ALLOC(pHThisView, sizeof(ChainView));
hipMemcpy(pHThisView, m_Chainer.pThisView, sizeof(ChainView), hipMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHThisView, sz);
hipMemcpy(pHThisView, m_Chainer.pThisView, sz, hipMemcpyDeviceToHost);
m_ChainDumper(m_CDContext, pHLastView, pHThisView);
}
m_Tracker.InternalFindTracks(minviewtag, width, height, pLastView, pThisView);
}
int PrismMapTracker::ClusterChainer::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::ClusterChainer::GetZScale() { return 1 << Z_SCALE_SHIFT; }
void PrismMapTracker::ClusterChainer::SetLogFileName(char *logfile) {}
ChainMapHeader *PrismMapTracker::ClusterChainer::Dump()
{
return 0;
}
SySal::ClusterChainer::Configuration PrismMapTracker::ClusterChainer::GetConfiguration()
{
return C;
}
SySal::OpaqueChainMap &PrismMapTracker::ClusterChainer::GetDeviceChainMap()
{
throw "Not supported.";
}
bool PrismMapTracker::ClusterChainer::SetReferenceZs(SySal::IntClusterFile &cf, bool istop)
{
EmulsionEdge t, b;
int refimg = 0;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (t.Valid && b.Valid)
{
int place = 0;
int i;
double thk = t.Z - b.Z;
for (i = 0; i < ThicknessSamples; i++)
if (pThicknessSamples[i] >= thk)
{
place = i;
break;
}
pThicknessSamples = (double *)realloc(pThicknessSamples, (ThicknessSamples + 1) * sizeof(double));
for (i = ThicknessSamples - 1; i >= place; i--)
pThicknessSamples[i + 1] = pThicknessSamples[i];
pThicknessSamples[place] = thk;
ThicknessSamples++;
}
return (ThicknessSamples >= 1);
}
double PrismMapTracker::GetThickness()
{
if (m_Chainer.ThicknessSamples <= 0) throw "No thickness info available.";
if (m_Chainer.ThicknessSamples % 2 == 1) return m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2];
return 0.5 * (m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2] + m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2 - 1]);
}
int PrismMapTracker::ClusterChainer::TotalChains()
{
return 0;
}
int PrismMapTracker::Tracker::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetZScale() { return 1 << Z_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetSlopeScale() { return 1 << SLOPE_SCALE_SHIFT; }
SySal::Tracker::Configuration PrismMapTracker::Tracker::GetConfiguration()
{
return C;
}
void PrismMapTracker::Tracker::SetOption(const char *option, const char *value)
{
if (strcmpi(option, "_MergeTracksKernel_LoopLimiter_") == 0)
{
if (sscanf(value, "%d", &_MergeTracksKernel_LoopLimiter_) != 1)
throw "Bad option value.";
}
}
void PrismMapTracker::Tracker::SetLogFileName(char *logfile) {}
SySal::TrackMapHeader *PrismMapTracker::Tracker::Dump() { return pHostTracks; }
int PrismMapTracker::Tracker::TotalTracks() { return pHostTracks->Count; }
int PrismMapTracker::Tracker::FindTracks(SySal::ChainMapHeader &cm)
{
throw "Not supported.";
}
int PrismMapTracker::Tracker::FindTracksInDevice(SySal::OpaqueChainMap &cm)
{
throw "Superseded by PrismMapTracker::Tracker::Dump.";
}
PrismMapTracker::PerformanceCounters PrismMapTracker::GetPerformanceCounters() { return m_PerformanceCounters; }
};
}; | 0e202d7634d9aaaef94c26c83b0e5b0db7789309.cu | #include "gpu_incremental_map_track.h"
#include <malloc.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "gpu_defines.h"
namespace SySal
{
namespace GPU
{
void PrismMapTracker::make_threads_blocks(int iterations, dim3 &threads, dim3 &blocks)
{
threads.x = __max(1, __min(iterations, m_Prop.maxThreadsPerBlock));
threads.y = threads.z = 1;
blocks.x = int(ceil((double)iterations / (double)threads.x));
blocks.y = blocks.z = 1;
if (threads.x * blocks.x > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d blocks of %d threads requested." , blocks.x, threads.x);
}
}
void PrismMapTracker::HardReset()
{
m_Chainer.HardReset();
m_Tracker.HardReset();
}
PrismMapTracker::PrismMapTracker(int gpuid) : CTOR_INIT(pHThisView), CTOR_INIT(pHLastView)
{
m_Chainer.pThis = this;
m_Tracker.pThis = this;
m_DeviceId = gpuid;
m_ChainDumper = 0;
m_PerformanceCounters.GPU = m_DeviceId;
m_PerformanceCounters.GPUClockMHz = 0;
m_PerformanceCounters.GPUCores = 0;
m_PerformanceCounters.MapTimeMS = 0;
m_PerformanceCounters.TrackTimeMS = 0;
m_PerformanceCounters.Clusters = 0;
m_PerformanceCounters.Chains = 0;
m_PerformanceCounters.Tracks = 0;
if (cudaGetDeviceProperties(&m_Prop, m_DeviceId)) throw "Invalid CUDA device.";
m_PerformanceCounters.GPUClockMHz = m_Prop.clockRate / 1000;
int cores = 0;
if (m_Prop.major == 1)
{
cores = 8;
}
else if (m_Prop.major == 2)
{
if (m_Prop.minor == 0)
cores = 32;
else if (m_Prop.minor == 1)
cores = 48;
}
else if (m_Prop.major == 3)
{
cores = 192;
}
else if (m_Prop.major == 5)
{
cores = 128;
}
m_PerformanceCounters.GPUCores = m_Prop.multiProcessorCount * cores;
printf("\n\nCUDA properties for device %d\nCompute capability %d.%d\nMaxThreadsPerMultiProcessor %d\nMaxThreadsPerBlock %d\nMultiprocessors %d\nMaxGridSize: %d %d %d\nMemory %d MB\n\n",
gpuid, m_Prop.major, m_Prop.minor, m_Prop.maxThreadsPerMultiProcessor, m_Prop.maxThreadsPerBlock, m_Prop.multiProcessorCount, m_Prop.maxGridSize[0], m_Prop.maxGridSize[1], m_Prop.maxGridSize[2], m_Prop.totalGlobalMem / 1048576);
}
PrismMapTracker::~PrismMapTracker()
{
HOST_DEALLOC(pHLastView);
HOST_DEALLOC(pHThisView);
}
void PrismMapTracker::SetChainDumper(void *pContext, ChainDumper dmp)
{
m_ChainDumper = dmp;
m_CDContext = pContext;
}
void PrismMapTracker::SendViewsToTracker(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
if (m_ChainDumper)
{
int sz;
HOST_WISE_ALLOC(pHLastView, sizeof(ChainView));
cudaMemcpy(pHLastView, m_Chainer.pLastView, sizeof(ChainView), cudaMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHLastView, sz);
cudaMemcpy(pHLastView, m_Chainer.pLastView, sz, cudaMemcpyDeviceToHost);
HOST_WISE_ALLOC(pHThisView, sizeof(ChainView));
cudaMemcpy(pHThisView, m_Chainer.pThisView, sizeof(ChainView), cudaMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHThisView, sz);
cudaMemcpy(pHThisView, m_Chainer.pThisView, sz, cudaMemcpyDeviceToHost);
m_ChainDumper(m_CDContext, pHLastView, pHThisView);
}
m_Tracker.InternalFindTracks(minviewtag, width, height, pLastView, pThisView);
}
int PrismMapTracker::ClusterChainer::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::ClusterChainer::GetZScale() { return 1 << Z_SCALE_SHIFT; }
void PrismMapTracker::ClusterChainer::SetLogFileName(char *logfile) {}
ChainMapHeader *PrismMapTracker::ClusterChainer::Dump()
{
return 0;
}
SySal::ClusterChainer::Configuration PrismMapTracker::ClusterChainer::GetConfiguration()
{
return C;
}
SySal::OpaqueChainMap &PrismMapTracker::ClusterChainer::GetDeviceChainMap()
{
throw "Not supported.";
}
bool PrismMapTracker::ClusterChainer::SetReferenceZs(SySal::IntClusterFile &cf, bool istop)
{
EmulsionEdge t, b;
int refimg = 0;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (t.Valid && b.Valid)
{
int place = 0;
int i;
double thk = t.Z - b.Z;
for (i = 0; i < ThicknessSamples; i++)
if (pThicknessSamples[i] >= thk)
{
place = i;
break;
}
pThicknessSamples = (double *)realloc(pThicknessSamples, (ThicknessSamples + 1) * sizeof(double));
for (i = ThicknessSamples - 1; i >= place; i--)
pThicknessSamples[i + 1] = pThicknessSamples[i];
pThicknessSamples[place] = thk;
ThicknessSamples++;
}
return (ThicknessSamples >= 1);
}
double PrismMapTracker::GetThickness()
{
if (m_Chainer.ThicknessSamples <= 0) throw "No thickness info available.";
if (m_Chainer.ThicknessSamples % 2 == 1) return m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2];
return 0.5 * (m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2] + m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2 - 1]);
}
int PrismMapTracker::ClusterChainer::TotalChains()
{
return 0;
}
int PrismMapTracker::Tracker::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetZScale() { return 1 << Z_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetSlopeScale() { return 1 << SLOPE_SCALE_SHIFT; }
SySal::Tracker::Configuration PrismMapTracker::Tracker::GetConfiguration()
{
return C;
}
void PrismMapTracker::Tracker::SetOption(const char *option, const char *value)
{
if (strcmpi(option, "_MergeTracksKernel_LoopLimiter_") == 0)
{
if (sscanf(value, "%d", &_MergeTracksKernel_LoopLimiter_) != 1)
throw "Bad option value.";
}
}
void PrismMapTracker::Tracker::SetLogFileName(char *logfile) {}
SySal::TrackMapHeader *PrismMapTracker::Tracker::Dump() { return pHostTracks; }
int PrismMapTracker::Tracker::TotalTracks() { return pHostTracks->Count; }
int PrismMapTracker::Tracker::FindTracks(SySal::ChainMapHeader &cm)
{
throw "Not supported.";
}
int PrismMapTracker::Tracker::FindTracksInDevice(SySal::OpaqueChainMap &cm)
{
throw "Superseded by PrismMapTracker::Tracker::Dump.";
}
PrismMapTracker::PerformanceCounters PrismMapTracker::GetPerformanceCounters() { return m_PerformanceCounters; }
};
}; |
699fd8f64f8716bc41f43af27176bb77053a3a63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
// A common way to represent color images is known as RGBA - the color
// is specified by how much Red, Grean and Blue is in it.
// The 'A' stands for Alpha and is used for transparency, it will be
// ignored in this homework.
// Each channel Red, Blue, Green and Alpha is represented by one byte.
// Since we are using one byte for each color there are 256 different
// possible values for each color. This means we use 4 bytes per pixel.
// Greyscale images are represented by a single intensity value per pixel
// which is one byte in size.
// To convert an image from color to grayscale one simple method is to
// set the intensity to the average of the RGB channels. But we will
// use a more sophisticated method that takes into account how the eye
// perceives color and weights the channels unequally.
// The eye responds most strongly to green followed by red and then blue.
// The NTSC (National Television System Committee) recommends the following
// formula for color to greyscale conversion:
// I = .299f * R + .587f * G + .114f * B
// Notice the trailing f's on the numbers which indicate that they are
// single precision floating point constants and not double precision
// constants.
// You should fill in the kernel as well as set the block and grid sizes
// so that the entire image is processed.
#include "hvr/HW1/utils.h"
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows,
int numCols)
{
// TODO
// Fill in the kernel to convert from color to greyscale
// the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
// The output (greyImage) at each pixel should be the result of
// applying the formula: output = .299f * R + .587f * G + .114f * B;
// Note: We will be ignoring the alpha channel for this conversion
// First create a mapping from the 2D block and grid locations
// to an absolute 2D location in the image, then use that to
// calculate a 1D offset
}
M1_DLL
void your_rgba_to_greyscale(const uchar4* const h_rgbaImage,
uchar4* const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows,
size_t numCols)
{
// You must fill in the correct sizes for the blockSize and gridSize
// currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); // TODO
const dim3 gridSize(1, 1, 1); // TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0,
d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 699fd8f64f8716bc41f43af27176bb77053a3a63.cu | // Homework 1
// Color to Greyscale Conversion
// A common way to represent color images is known as RGBA - the color
// is specified by how much Red, Grean and Blue is in it.
// The 'A' stands for Alpha and is used for transparency, it will be
// ignored in this homework.
// Each channel Red, Blue, Green and Alpha is represented by one byte.
// Since we are using one byte for each color there are 256 different
// possible values for each color. This means we use 4 bytes per pixel.
// Greyscale images are represented by a single intensity value per pixel
// which is one byte in size.
// To convert an image from color to grayscale one simple method is to
// set the intensity to the average of the RGB channels. But we will
// use a more sophisticated method that takes into account how the eye
// perceives color and weights the channels unequally.
// The eye responds most strongly to green followed by red and then blue.
// The NTSC (National Television System Committee) recommends the following
// formula for color to greyscale conversion:
// I = .299f * R + .587f * G + .114f * B
// Notice the trailing f's on the numbers which indicate that they are
// single precision floating point constants and not double precision
// constants.
// You should fill in the kernel as well as set the block and grid sizes
// so that the entire image is processed.
#include "hvr/HW1/utils.h"
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows,
int numCols)
{
// TODO
// Fill in the kernel to convert from color to greyscale
// the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
// The output (greyImage) at each pixel should be the result of
// applying the formula: output = .299f * R + .587f * G + .114f * B;
// Note: We will be ignoring the alpha channel for this conversion
// First create a mapping from the 2D block and grid locations
// to an absolute 2D location in the image, then use that to
// calculate a 1D offset
}
M1_DLL
void your_rgba_to_greyscale(const uchar4* const h_rgbaImage,
uchar4* const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows,
size_t numCols)
{
// You must fill in the correct sizes for the blockSize and gridSize
// currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); // TODO
const dim3 gridSize(1, 1, 1); // TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(
d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
ce81f31cde02b9caf67008eee503e2349bd10465.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const Dtype pos_mult_, const int pos_cid_,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1;
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -w *log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//std::cout<<"--------liulin WeightedSoftmaxLossForwardGPU....."<<"pos_mult:"<<pos_mult_<<",pos_cid_:"<<pos_cid_<<"\n";
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
pos_mult_, pos_cid_,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff,
const Dtype pos_mult_, const int pos_cid_,
const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1;
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
//liulin 20161118 modify
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= w;
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//std::cout<<"--------liulin WeightedSoftmaxLossBackwardGPU....."<<"pos_mult:"<<pos_mult_<<",pos_cid_:"<<pos_cid_<<"\n";
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
pos_mult_, pos_cid_,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
| ce81f31cde02b9caf67008eee503e2349bd10465.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const Dtype pos_mult_, const int pos_cid_,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1;
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -w *log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//std::cout<<"--------liulin WeightedSoftmaxLossForwardGPU....."<<"pos_mult:"<<pos_mult_<<",pos_cid_:"<<pos_cid_<<"\n";
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
pos_mult_, pos_cid_,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff,
const Dtype pos_mult_, const int pos_cid_,
const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
Dtype w = (label_value == pos_cid_) ? pos_mult_ : 1;
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
//liulin 20161118 modify
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= w;
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
//std::cout<<"--------liulin WeightedSoftmaxLossBackwardGPU....."<<"pos_mult:"<<pos_mult_<<",pos_cid_:"<<pos_cid_<<"\n";
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
pos_mult_, pos_cid_,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
|
887f4e3792f4674022c1ee891bf52fb9b467c7f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <limits.h>
#include <Indice1D.h>
#include "reductionADD.h"
__global__ void setup_kernel_rand(hiprandState_t* tabDevGenerator, int deviceId);
__global__ void monteCarlo(hiprandState_t* tabDevGenerator, int nbFleches, float m,int* ptrDevNx);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ float f(float x);
static __device__ void reduceIntraThread(hiprandState_t* tabDevGenerator,int* tabSM, int nbFleches,int m);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__
void setup_kernel_rand(hiprandState_t* tabDevGenerator, int deviceId)
{
// Customisation du generator:
// Proposition, au lecteur de faire mieux !
// Contrainte : Doit etre diffrent d'un GPU l'autre
// Contrainte : Doit etre diffrent dun thread lautre
const int TID = Indice1D::tid();
int deltaSeed = deviceId * INT_MAX / 10000;
int deltaSequence = deviceId * 100;
int deltaOffset = deviceId * 100;
int seed = 1234 + deltaSeed;
int sequenceNumber = TID + deltaSequence;
int offset = deltaOffset;
hiprand_init(seed, sequenceNumber, offset, &tabDevGenerator[TID]);
}
__global__
void monteCarlo(hiprandState_t* tabDevGenerator, int nbFleches, float m,int* ptrDevNx)
{
extern __shared__ int tabSM[];
reduceIntraThread(tabDevGenerator, tabSM, nbFleches,m);
__syncthreads();
reductionADD<int>(tabSM,ptrDevNx);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reduceIntraThread(hiprandState_t* tabDevGenerator,int* tabSM, int nbFleches,int m){
const int TID = Indice1D::tid();
const int TID_LOCAL = Indice1D::tidLocal();
const int NB_THREAD = Indice1D::nbThread();
// Global Memory -> Register (optimization)
hiprandState_t localGenerator = tabDevGenerator[TID];
float xAlea;
float yAlea;
float y;
int nx = 0;
for (int i = 1; i <= nbFleches / NB_THREAD; i++)
{
xAlea = hiprand_uniform(&localGenerator); //Genere des nombres entre 0 et 1
yAlea = hiprand_uniform(&localGenerator) * m;
y = f(xAlea);
if (y >= yAlea)
{
nx++;
}
//work(xAlea, yAlea);
}
//Register -> Global Memory
//Necessaire si on veut utiliser notre generator
// - dans dautre kernel
// - avec dautres nombres aleatoires !
tabDevGenerator[TID] = localGenerator;
tabSM[TID_LOCAL]=nx;
}
__device__ float f(float x)
{
return 4.0f / (1.0f + x * x);
}
| 887f4e3792f4674022c1ee891bf52fb9b467c7f9.cu | #include <curand_kernel.h>
#include <limits.h>
#include <Indice1D.h>
#include "reductionADD.h"
__global__ void setup_kernel_rand(curandState* tabDevGenerator, int deviceId);
__global__ void monteCarlo(curandState* tabDevGenerator, int nbFleches, float m,int* ptrDevNx);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ float f(float x);
static __device__ void reduceIntraThread(curandState* tabDevGenerator,int* tabSM, int nbFleches,int m);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__
void setup_kernel_rand(curandState* tabDevGenerator, int deviceId)
{
// Customisation du generator:
// Proposition, au lecteur de faire mieux !
// Contrainte : Doit etre différent d'un GPU à l'autre
// Contrainte : Doit etre différent dun thread à lautre
const int TID = Indice1D::tid();
int deltaSeed = deviceId * INT_MAX / 10000;
int deltaSequence = deviceId * 100;
int deltaOffset = deviceId * 100;
int seed = 1234 + deltaSeed;
int sequenceNumber = TID + deltaSequence;
int offset = deltaOffset;
curand_init(seed, sequenceNumber, offset, &tabDevGenerator[TID]);
}
__global__
void monteCarlo(curandState* tabDevGenerator, int nbFleches, float m,int* ptrDevNx)
{
extern __shared__ int tabSM[];
reduceIntraThread(tabDevGenerator, tabSM, nbFleches,m);
__syncthreads();
reductionADD<int>(tabSM,ptrDevNx);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reduceIntraThread(curandState* tabDevGenerator,int* tabSM, int nbFleches,int m){
const int TID = Indice1D::tid();
const int TID_LOCAL = Indice1D::tidLocal();
const int NB_THREAD = Indice1D::nbThread();
// Global Memory -> Register (optimization)
curandState localGenerator = tabDevGenerator[TID];
float xAlea;
float yAlea;
float y;
int nx = 0;
for (int i = 1; i <= nbFleches / NB_THREAD; i++)
{
xAlea = curand_uniform(&localGenerator); //Genere des nombres entre 0 et 1
yAlea = curand_uniform(&localGenerator) * m;
y = f(xAlea);
if (y >= yAlea)
{
nx++;
}
//work(xAlea, yAlea);
}
//Register -> Global Memory
//Necessaire si on veut utiliser notre generator
// - dans dautre kernel
// - avec dautres nombres aleatoires !
tabDevGenerator[TID] = localGenerator;
tabSM[TID_LOCAL]=nx;
}
__device__ float f(float x)
{
return 4.0f / (1.0f + x * x);
}
|
5e562f7f33b828b2e526a90645c78c52abe9b00f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "PadOp.h"
namespace paddle {
__global__ void KePad(real* outputs, const real* inputs,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
outputs[off] = inputs[idx];
}
}
template <>
void Pad<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const PadConf& pad) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend;
int outH = inH + hstart + hend;
int outW = inW + wstart + wend;
hipLaunchKernelGGL(( KePad), dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
outputs, inputs, inC, inH, inW, cstart, hstart, wstart,
outC, outH, outW, nth);
CHECK_SYNC("Pad");
}
__global__ void KePadDiff(real* inGrad, const real* outGrad,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
inGrad[idx] += outGrad[off];
}
}
template <>
void PadGrad<DEVICE_TYPE_GPU>(real* inGrad,
const real* outGrad,
const int num,
const int inC,
const int inH,
const int inW,
const PadConf& pad) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend;
int outH = inH + hstart + hend;
int outW = inW + wstart + wend;
hipLaunchKernelGGL(( KePadDiff) , dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
inGrad, outGrad, inC, inH, inW, cstart, hstart, wstart,
outC, outH, outW, nth);
CHECK_SYNC("PadGrad");
}
} // namespace paddle
| 5e562f7f33b828b2e526a90645c78c52abe9b00f.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "PadOp.h"
namespace paddle {
__global__ void KePad(real* outputs, const real* inputs,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
outputs[off] = inputs[idx];
}
}
template <>
void Pad<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const PadConf& pad) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend;
int outH = inH + hstart + hend;
int outW = inW + wstart + wend;
KePad<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(outputs, inputs, inC, inH, inW, cstart, hstart, wstart,
outC, outH, outW, nth);
CHECK_SYNC("Pad");
}
__global__ void KePadDiff(real* inGrad, const real* outGrad,
int inC, int inH, int inW,
int padc, int padh, int padw,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
inGrad[idx] += outGrad[off];
}
}
template <>
void PadGrad<DEVICE_TYPE_GPU>(real* inGrad,
const real* outGrad,
const int num,
const int inC,
const int inH,
const int inW,
const PadConf& pad) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend;
int outH = inH + hstart + hend;
int outW = inW + wstart + wend;
KePadDiff <<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(inGrad, outGrad, inC, inH, inW, cstart, hstart, wstart,
outC, outH, outW, nth);
CHECK_SYNC("PadGrad");
}
} // namespace paddle
|
9aed3c484d44e5dc88f2eb138721f71aded37d7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_idata(idata, idata + n);
thrust::device_vector<int> dev_odata(n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
| 9aed3c484d44e5dc88f2eb138721f71aded37d7c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_idata(idata, idata + n);
thrust::device_vector<int> dev_odata(n);
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
f88616a86269317d3f61777306df2c4c0096f904.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "curandom.hpp"
__constant__ seed_t RNG_G = (seed_t)(6364136223846793005ull);
__constant__ seed_t RNG_C = (seed_t)(1442695040888963407ull);
__constant__ seed_t RNG_P = (seed_t)(1) << 63;
__global__ void rnd_real_kernel(int n, seed_t *seeds, float *reals) {
float inv_RNG_P = (float)(1.0) / (float)((seed_t)(1) << 63);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
seeds[i] = (RNG_G * seeds[i] + RNG_C) % RNG_P;
reals[i] = seeds[i] * inv_RNG_P;
}
}
| f88616a86269317d3f61777306df2c4c0096f904.cu | #include "curandom.hpp"
__constant__ seed_t RNG_G = (seed_t)(6364136223846793005ull);
__constant__ seed_t RNG_C = (seed_t)(1442695040888963407ull);
__constant__ seed_t RNG_P = (seed_t)(1) << 63;
__global__ void rnd_real_kernel(int n, seed_t *seeds, float *reals) {
float inv_RNG_P = (float)(1.0) / (float)((seed_t)(1) << 63);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
seeds[i] = (RNG_G * seeds[i] + RNG_C) % RNG_P;
reals[i] = seeds[i] * inv_RNG_P;
}
}
|
76ae4ed6f6c867eeebf15948cbdda7abed80b337.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
#include "MyFirst_kernel.cu"
//
// main host code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 16;
nsize = nblocks*nthreads;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
hipMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
// copy results from device to host
hipMemcpy(h_x,d_x,nsize*sizeof(float),hipMemcpyDeviceToHost);
// print results
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// check results
float sumcheck = 0.;
float sumcheckcorrect = 0.;
for (int i = 0; i < nblocks * nthreads; ++i) {
sumcheck += h_x[i];
}
for (int j=0; j<nthreads; ++j) {
sumcheckcorrect += j;
}
sumcheckcorrect *= 2;
if (fabs(sumcheck-sumcheckcorrect)<1e-6) {
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
// free memory
hipFree(d_x);
free(h_x);
return 0;
}
| 76ae4ed6f6c867eeebf15948cbdda7abed80b337.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
#include "MyFirst_kernel.cu"
//
// main host code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 16;
nsize = nblocks*nthreads;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
// copy results from device to host
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
// print results
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// check results
float sumcheck = 0.;
float sumcheckcorrect = 0.;
for (int i = 0; i < nblocks * nthreads; ++i) {
sumcheck += h_x[i];
}
for (int j=0; j<nthreads; ++j) {
sumcheckcorrect += j;
}
sumcheckcorrect *= 2;
if (fabs(sumcheck-sumcheckcorrect)<1e-6) {
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
e8c65239925f3ee65200416f058c2b6d308c5e45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
sum[i] = a[i] + b[i];
}
} | e8c65239925f3ee65200416f058c2b6d308c5e45.cu | extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
{
sum[i] = a[i] + b[i];
}
} |
bbad62fa5a21489ecab06061c9be80f7b277703b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMultiplyAndTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int numARows = 1;
int numAColumns = 1;
int numBRows = 1;
int numBColumns = 1;
int numCRows = 1;
int numCColumns = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixMultiplyAndTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixMultiplyAndTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixMultiplyAndTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bbad62fa5a21489ecab06061c9be80f7b277703b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixMultiplyAndTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int numARows = 1;
int numAColumns = 1;
int numBRows = 1;
int numBColumns = 1;
int numCRows = 1;
int numCColumns = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixMultiplyAndTanh<<<gridBlock,threadBlock>>>(A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixMultiplyAndTanh<<<gridBlock,threadBlock>>>(A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixMultiplyAndTanh<<<gridBlock,threadBlock>>>(A,B,C,b,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b054bc410ddd75bc0627338cb3ec1e3c979dfbe1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// DepthwiseConvInt8Execution.cpp
// MNN
//
// Created by MNN on 2023/01/15.
// Copyright 2018, Alibaba Group Holding Limited
//
#ifdef ENABLE_CUDA_QUANT
#include "DepthwiseConvInt8Execution.hpp"
#include "../Raster.cuh"
#include "../MNNCUDADefine.hpp"
#include "../MNNCUDAFunction.cuh"
#include <sm_61_intrinsics.h>
namespace MNN {
namespace CUDA {
__inline__ __device__
int32_t vecDot(char4 inp0, char4 inp1, int32_t val)
{
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
return __dp4a(inp0, inp1, val);
#else
int32_t res = val;
res += inp0.x * inp1.x;
res += inp0.y * inp1.y;
res += inp0.z * inp1.z;
res += inp0.w * inp1.w;
return res;
#endif
}
__global__ void CONV_DW_INT8_(const int8_t* input,
const int8_t* kernel,
const int32_t* bias,
const float* scale,
int8_t *output,
const int8_t maxV,
const int8_t minV,
const int iw,
const int ih,
const int c,
const int c_p,
const int ow,
const int oh,
const int kw,
const int kh,
const int k_p,
const int dw,
const int dh,
const int sw,
const int sh,
const int pw,
const int ph,
const int total,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/4; index += blockDim.x * gridDim.x) {
int oz_4, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_4);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_4 << 2;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
int4 bias4 = ((int4 *)(bias + oz))[0];
int color0 = bias4.x;
int color1 = bias4.y;
int color2 = bias4.z;
int color3 = bias4.w;
int fxSta = max(0, (UP_DIV(-ix, dw)));
int fySta = max(0, (UP_DIV(-iy, dh)));
int fxEnd = min(kw, UP_DIV(iw - ix, dw));
int fyEnd = min(kh, UP_DIV(ih - iy, dh));
for (int fy=fySta; fy<fyEnd; ++fy) {
int sy = fy*dh + iy;
for (int fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx*dw + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
char4 inp4 = ((char4 *)(input + src_offset))[0];
char4 ker4 = ((char4 *)(kernel + (fy * kw + fx) * c_p + oz))[0];;
color0 = color0 + (int)inp4.x * (int)ker4.x;
color1 = color1 + (int)inp4.y * (int)ker4.y;
color2 = color2 + (int)inp4.z * (int)ker4.z;
color3 = color3 + (int)inp4.w * (int)ker4.w;
}
}
float4 scale4 = ((float4 *)(scale + oz))[0];
color0 = __float2int_rn((float)color0 * scale4.x);
color1 = __float2int_rn((float)color1 * scale4.y);
color2 = __float2int_rn((float)color2 * scale4.z);
color3 = __float2int_rn((float)color3 * scale4.w);
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
color2 = max(color2, minV);
color2 = min(color2, maxV);
color3 = max(color3, minV);
color3 = min(color3, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
((char4*)(output + dst_offset))[0] = make_char4((color0), (color1), (color2), (color3));
}
}
__global__ void CONV_DW3x3S1_INT8_OPT(const int8_t* input,
const int8_t* kernel,
const int32_t* bias,
const float* scale,
int8_t *output,
const int8_t maxV,
const int8_t minV,
const int iw,
const int ih,
const int c,
const int c_p,
const int ow,
const int oh,
const int kw,
const int kh,
const int k_p,
const int dw,
const int dh,
const int sw,
const int sh,
const int pw,
const int ph,
const int total,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/8; index += blockDim.x * gridDim.x) {
int oz, ix, oy, ox, iy, ob;
d_oc.divmod(index, iy, oz);
d_ow.divmod(iy, ix, ox);
d_oh.divmod(ix, ob, oy);
ox = ox << 1;
oz = oz << 2;
ix = ox - 1;
iy = oy - 1;
int4 bias4 = ((int4 *)(bias + oz))[0];
int color0_0 = (int)bias4.x;
int color0_1 = color0_0;
int color1_0 = (int)bias4.y;
int color1_1 = color1_0;
int color2_0 = (int)bias4.z;
int color2_1 = color2_0;
int color3_0 = (int)bias4.w;
int color3_1 = color3_0;
char4 zero4 = make_char4(0, 0, 0, 0);
char4 inp4[12], ker4[3][3];
#pragma unroll
for(int j=0; j<3; j++) {
if(iy < 0 && j==0) {
for(int i=0; i<4; i++) {
inp4[i] = zero4;
}
continue;
}
if(iy+2 > ih-1 && j==2) {
for(int i=0; i<4; i++) {
inp4[8+i] = zero4;
}
continue;
}
for(int i=0; i<4; i++) {
if(ix < 0 && i==0) {
for(int j=0; j<3; j++) {
inp4[4*j+0] = zero4;
}
continue;
}
if(ix+3 > iw-1 && i==3) {
for(int j=0; j<3; j++) {
inp4[4*j+3] = zero4;
}
continue;
}
int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz;
inp4[4*j+i] = ((char4 *)(input + src_offset))[0];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
ker4[j][i] = ((char4 *)(kernel + (j * 3 + i) * c_p + oz))[0];// kernel[(j * 3 + i) * c_p + oz];
}
}
// 1st channel
char4 tmp_ker4 = make_char4(ker4[0][0].x, ker4[0][1].x, ker4[0][2].x, ker4[1][0].x);
color0_0 += vecDot(make_char4(inp4[0].x, inp4[1].x, inp4[2].x, inp4[4].x), tmp_ker4, 0);
color0_1 += vecDot(make_char4(inp4[1].x, inp4[2].x, inp4[3].x, inp4[5].x), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].x, ker4[1][2].x, ker4[2][0].x, ker4[2][1].x);
color0_0 += vecDot(make_char4(inp4[5].x, inp4[6].x, inp4[8].x, inp4[9].x), tmp_ker4, 0);
color0_1 += vecDot(make_char4(inp4[6].x, inp4[7].x, inp4[9].x, inp4[10].x), tmp_ker4, 0);
color0_0 += inp4[10].x * ker4[2][2].x;
color0_1 += inp4[11].x * ker4[2][2].x;
// 2nd channel
tmp_ker4 = make_char4(ker4[0][0].y, ker4[0][1].y, ker4[0][2].y, ker4[1][0].y);
color1_0 += vecDot(make_char4(inp4[0].y, inp4[1].y, inp4[2].y, inp4[4].y), tmp_ker4, 0);
color1_1 += vecDot(make_char4(inp4[1].y, inp4[2].y, inp4[3].y, inp4[5].y), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].y, ker4[1][2].y, ker4[2][0].y, ker4[2][1].y);
color1_0 += vecDot(make_char4(inp4[5].y, inp4[6].y, inp4[8].y, inp4[9].y), tmp_ker4, 0);
color1_1 += vecDot(make_char4(inp4[6].y, inp4[7].y, inp4[9].y, inp4[10].y), tmp_ker4, 0);
color1_0 += inp4[10].y * ker4[2][2].y;
color1_1 += inp4[11].y * ker4[2][2].y;
// 3rd channel
tmp_ker4 = make_char4(ker4[0][0].z, ker4[0][1].z, ker4[0][2].z, ker4[1][0].z);
color2_0 += vecDot(make_char4(inp4[0].z, inp4[1].z, inp4[2].z, inp4[4].z), tmp_ker4, 0);
color2_1 += vecDot(make_char4(inp4[1].z, inp4[2].z, inp4[3].z, inp4[5].z), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].z, ker4[1][2].z, ker4[2][0].z, ker4[2][1].z);
color2_0 += vecDot(make_char4(inp4[5].z, inp4[6].z, inp4[8].z, inp4[9].z), tmp_ker4, 0);
color2_1 += vecDot(make_char4(inp4[6].z, inp4[7].z, inp4[9].z, inp4[10].z), tmp_ker4, 0);
color2_0 += inp4[10].z * ker4[2][2].z;
color2_1 += inp4[11].z * ker4[2][2].z;
// 4th channel
tmp_ker4 = make_char4(ker4[0][0].w, ker4[0][1].w, ker4[0][2].w, ker4[1][0].w);
color3_0 += vecDot(make_char4(inp4[0].w, inp4[1].w, inp4[2].w, inp4[4].w), tmp_ker4, 0);
color3_1 += vecDot(make_char4(inp4[1].w, inp4[2].w, inp4[3].w, inp4[5].w), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].w, ker4[1][2].w, ker4[2][0].w, ker4[2][1].w);
color3_0 += vecDot(make_char4(inp4[5].w, inp4[6].w, inp4[8].w, inp4[9].w), tmp_ker4, 0);
color3_1 += vecDot(make_char4(inp4[6].w, inp4[7].w, inp4[9].w, inp4[10].w), tmp_ker4, 0);
color3_0 += inp4[10].w * ker4[2][2].w;
color3_1 += inp4[11].w * ker4[2][2].w;
// Multiple scale
float4 scale4 = ((float4 *)(scale + oz))[0];
color0_0 = __float2int_rn((float)color0_0 * scale4.x);
color0_1 = __float2int_rn((float)color0_1 * scale4.x);
color1_0 = __float2int_rn((float)color1_0 * scale4.y);
color1_1 = __float2int_rn((float)color1_1 * scale4.y);
color2_0 = __float2int_rn((float)color2_0 * scale4.z);
color2_1 = __float2int_rn((float)color2_1 * scale4.z);
color3_0 = __float2int_rn((float)color3_0 * scale4.w);
color3_1 = __float2int_rn((float)color3_1 * scale4.w);
// Clamp
color0_0 = max(color0_0, minV);
color0_0 = min(color0_0, maxV);
color0_1 = max(color0_1, minV);
color0_1 = min(color0_1, maxV);
color1_0 = max(color1_0, minV);
color1_0 = min(color1_0, maxV);
color1_1 = max(color1_1, minV);
color1_1 = min(color1_1, maxV);
color2_0 = max(color2_0, minV);
color2_0 = min(color2_0, maxV);
color2_1 = max(color2_1, minV);
color2_1 = min(color2_1, maxV);
color3_0 = max(color3_0, minV);
color3_0 = min(color3_0, maxV);
color3_1 = max(color3_1, minV);
color3_1 = min(color3_1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
((char4*)(output + dst_offset))[0] = make_char4((color0_0), (color1_0), (color2_0), (color3_0));
((char4*)(output + dst_offset + c_p))[0] = make_char4((color0_1), (color1_1), (color2_1), (color3_1));
}
}
DepthwiseConvInt8Execution::DepthwiseConvInt8Execution(Backend* backend, const Op* op, std::shared_ptr<ConvInt8CutlassExecution::Resource> res) : ConvInt8CutlassExecution(backend, op, res) {
mOp = op;
mResource = res;//
}
DepthwiseConvInt8Execution::~DepthwiseConvInt8Execution() {
// Do nothing
}
bool DepthwiseConvInt8Execution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (nullptr == dst) {
return true;
}
auto exe = new DepthwiseConvInt8Execution(bn, op, mResource);
*dst = exe;
return true;
}
ErrorCode DepthwiseConvInt8Execution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto input = inputs[0];
auto output = outputs[0];
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
std::vector<float> inputQuantInfo = TensorUtils::getQuantInfo(input);
std::vector<float> outputQuantInfo = TensorUtils::getQuantInfo(output);
mResource->updateInputOutputScale(inputQuantInfo, outputQuantInfo);
runtime->memcpy(mResource->mBiasInt32Ptr, mResource->mBiasInt32Vec, mResource->mOutputChannelPack*sizeof(int32_t), MNNMemcpyHostToDevice);
runtime->memcpy(mResource->mScaleFloatPtr, mResource->mScaleFloatVec, mResource->mOutputChannelPack*sizeof(float), MNNMemcpyHostToDevice);
mPads = ConvolutionCommon::convolutionPad(input, output, mOp->main_as_Convolution2D()->common());
auto mCommon = mOp->main_as_Convolution2D()->common();
const int src_width = input->width();
const int src_height = input->height();
const int dst_width = output->width();
const int dst_height = output->height();
const int strideY = mCommon->strideY();
const int strideX = mCommon->strideX();
const int dilateY = mCommon->dilateY();
const int dilateX = mCommon->dilateX();
const int kernel_height = mCommon->kernelY();
const int kernel_width = mCommon->kernelX();
mStrides = std::make_pair(strideX, strideY);
mDilates = std::make_pair(dilateX, dilateY);
mKernels = std::make_pair(kernel_width, kernel_height);
auto clamp_max = mResource->mClampMax;
auto clamp_min = mResource->mClampMin;
if (mCommon->relu()) {
clamp_min = 0;
}
if (mCommon->relu6()) {
clamp_min = 0;
clamp_max = 6;
}
mClamps = std::make_pair(clamp_max, clamp_min);
// MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d\n", mKernels.first, mKernels.second, mStrides.first, mStrides.second, mDilates.first, mDilates.second, mPads.first, mPads.second);
return NO_ERROR;
}
ErrorCode DepthwiseConvInt8Execution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
auto input = inputs[0];
auto output = outputs[0];
const int batch = input->batch();
const int c = input->channel();
const int c_p = UP_DIV(c, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
const int iw = input->width();
const int ih = input->height();
const int ow = output->width();
const int oh = output->height();
const int total = batch * c_p * oh * ow;
const int k_p = UP_DIV(mKernels.first * mKernels.second, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
const auto weightPtr = mResource->mWeightInt8Ptr;
const auto biasPtr = mResource->mBiasInt32Ptr;
const auto scalePtr = mResource->mScaleFloatPtr;
int limitThreads = UP_DIV(total, prop.multiProcessorCount);
int threads_num = ALIMIN(prop.maxThreadsPerBlock / 2, limitThreads);
int block_num = prop.multiProcessorCount;
DivModFast d_oc(c_p / 4);
DivModFast d_ow(ow);
DivModFast d_oh(oh);
if(mKernels.first==3 && mKernels.second==3 && mStrides.first==1 && mStrides.second==1 && mPads.first==1 && mPads.second==1 && ow % 2 ==0) {
DivModFast d_ow2(ow/2);
hipLaunchKernelGGL(( CONV_DW3x3S1_INT8_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)inputs[0]->deviceId(), (const int8_t*)weightPtr,
(const int32_t*)biasPtr, (const float*)scalePtr, (int8_t*)outputs[0]->deviceId(),
mClamps.first, mClamps.second, iw, ih, c, c_p, ow, oh, mKernels.first, mKernels.second, k_p,
mDilates.first, mDilates.second, mStrides.first, mStrides.second, mPads.first, mPads.second,
total, d_oc, d_ow2, d_oh);
checkKernelErrors;
return NO_ERROR;
}
block_num = runtime->blocks_num(total);
threads_num = runtime->threads_num();
hipLaunchKernelGGL(( CONV_DW_INT8_), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)inputs[0]->deviceId(), (const int8_t*)weightPtr,
(const int32_t*)biasPtr, (const float*)scalePtr, (int8_t*)outputs[0]->deviceId(),
mClamps.first, mClamps.second, iw, ih, c, c_p, ow, oh, mKernels.first, mKernels.second, k_p,
mDilates.first, mDilates.second, mStrides.first, mStrides.second, mPads.first, mPads.second,
total, d_oc, d_ow, d_oh);
checkKernelErrors;
return NO_ERROR;
}
class DepthWiseConvInt8ExecutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (inputs.size() > 1) {
MNN_PRINT("OpType_DepthwiseConvInt8 CUDA not support multi input!, fall back...\n");
return nullptr;
}
std::shared_ptr<ConvInt8CutlassExecution::Resource> resource(new ConvInt8CutlassExecution::Resource(backend, op));
return new DepthwiseConvInt8Execution(backend, op, resource);
}
};
static CUDACreatorRegister<DepthWiseConvInt8ExecutionCreator> __init(OpType_DepthwiseConvInt8);
} // namespace CUDA
} // namespace MNN
#endif | b054bc410ddd75bc0627338cb3ec1e3c979dfbe1.cu | //
// DepthwiseConvInt8Execution.cpp
// MNN
//
// Created by MNN on 2023/01/15.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifdef ENABLE_CUDA_QUANT
#include "DepthwiseConvInt8Execution.hpp"
#include "../Raster.cuh"
#include "../MNNCUDADefine.hpp"
#include "../MNNCUDAFunction.cuh"
#include <sm_61_intrinsics.h>
namespace MNN {
namespace CUDA {
__inline__ __device__
int32_t vecDot(char4 inp0, char4 inp1, int32_t val)
{
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
return __dp4a(inp0, inp1, val);
#else
int32_t res = val;
res += inp0.x * inp1.x;
res += inp0.y * inp1.y;
res += inp0.z * inp1.z;
res += inp0.w * inp1.w;
return res;
#endif
}
__global__ void CONV_DW_INT8_(const int8_t* input,
const int8_t* kernel,
const int32_t* bias,
const float* scale,
int8_t *output,
const int8_t maxV,
const int8_t minV,
const int iw,
const int ih,
const int c,
const int c_p,
const int ow,
const int oh,
const int kw,
const int kh,
const int k_p,
const int dw,
const int dh,
const int sw,
const int sh,
const int pw,
const int ph,
const int total,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/4; index += blockDim.x * gridDim.x) {
int oz_4, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_4);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_4 << 2;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
int4 bias4 = ((int4 *)(bias + oz))[0];
int color0 = bias4.x;
int color1 = bias4.y;
int color2 = bias4.z;
int color3 = bias4.w;
int fxSta = max(0, (UP_DIV(-ix, dw)));
int fySta = max(0, (UP_DIV(-iy, dh)));
int fxEnd = min(kw, UP_DIV(iw - ix, dw));
int fyEnd = min(kh, UP_DIV(ih - iy, dh));
for (int fy=fySta; fy<fyEnd; ++fy) {
int sy = fy*dh + iy;
for (int fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx*dw + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
char4 inp4 = ((char4 *)(input + src_offset))[0];
char4 ker4 = ((char4 *)(kernel + (fy * kw + fx) * c_p + oz))[0];;
color0 = color0 + (int)inp4.x * (int)ker4.x;
color1 = color1 + (int)inp4.y * (int)ker4.y;
color2 = color2 + (int)inp4.z * (int)ker4.z;
color3 = color3 + (int)inp4.w * (int)ker4.w;
}
}
float4 scale4 = ((float4 *)(scale + oz))[0];
color0 = __float2int_rn((float)color0 * scale4.x);
color1 = __float2int_rn((float)color1 * scale4.y);
color2 = __float2int_rn((float)color2 * scale4.z);
color3 = __float2int_rn((float)color3 * scale4.w);
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
color2 = max(color2, minV);
color2 = min(color2, maxV);
color3 = max(color3, minV);
color3 = min(color3, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
((char4*)(output + dst_offset))[0] = make_char4((color0), (color1), (color2), (color3));
}
}
__global__ void CONV_DW3x3S1_INT8_OPT(const int8_t* input,
const int8_t* kernel,
const int32_t* bias,
const float* scale,
int8_t *output,
const int8_t maxV,
const int8_t minV,
const int iw,
const int ih,
const int c,
const int c_p,
const int ow,
const int oh,
const int kw,
const int kh,
const int k_p,
const int dw,
const int dh,
const int sw,
const int sh,
const int pw,
const int ph,
const int total,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/8; index += blockDim.x * gridDim.x) {
int oz, ix, oy, ox, iy, ob;
d_oc.divmod(index, iy, oz);
d_ow.divmod(iy, ix, ox);
d_oh.divmod(ix, ob, oy);
ox = ox << 1;
oz = oz << 2;
ix = ox - 1;
iy = oy - 1;
int4 bias4 = ((int4 *)(bias + oz))[0];
int color0_0 = (int)bias4.x;
int color0_1 = color0_0;
int color1_0 = (int)bias4.y;
int color1_1 = color1_0;
int color2_0 = (int)bias4.z;
int color2_1 = color2_0;
int color3_0 = (int)bias4.w;
int color3_1 = color3_0;
char4 zero4 = make_char4(0, 0, 0, 0);
char4 inp4[12], ker4[3][3];
#pragma unroll
for(int j=0; j<3; j++) {
if(iy < 0 && j==0) {
for(int i=0; i<4; i++) {
inp4[i] = zero4;
}
continue;
}
if(iy+2 > ih-1 && j==2) {
for(int i=0; i<4; i++) {
inp4[8+i] = zero4;
}
continue;
}
for(int i=0; i<4; i++) {
if(ix < 0 && i==0) {
for(int j=0; j<3; j++) {
inp4[4*j+0] = zero4;
}
continue;
}
if(ix+3 > iw-1 && i==3) {
for(int j=0; j<3; j++) {
inp4[4*j+3] = zero4;
}
continue;
}
int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz;
inp4[4*j+i] = ((char4 *)(input + src_offset))[0];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
ker4[j][i] = ((char4 *)(kernel + (j * 3 + i) * c_p + oz))[0];// kernel[(j * 3 + i) * c_p + oz];
}
}
// 1st channel
char4 tmp_ker4 = make_char4(ker4[0][0].x, ker4[0][1].x, ker4[0][2].x, ker4[1][0].x);
color0_0 += vecDot(make_char4(inp4[0].x, inp4[1].x, inp4[2].x, inp4[4].x), tmp_ker4, 0);
color0_1 += vecDot(make_char4(inp4[1].x, inp4[2].x, inp4[3].x, inp4[5].x), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].x, ker4[1][2].x, ker4[2][0].x, ker4[2][1].x);
color0_0 += vecDot(make_char4(inp4[5].x, inp4[6].x, inp4[8].x, inp4[9].x), tmp_ker4, 0);
color0_1 += vecDot(make_char4(inp4[6].x, inp4[7].x, inp4[9].x, inp4[10].x), tmp_ker4, 0);
color0_0 += inp4[10].x * ker4[2][2].x;
color0_1 += inp4[11].x * ker4[2][2].x;
// 2nd channel
tmp_ker4 = make_char4(ker4[0][0].y, ker4[0][1].y, ker4[0][2].y, ker4[1][0].y);
color1_0 += vecDot(make_char4(inp4[0].y, inp4[1].y, inp4[2].y, inp4[4].y), tmp_ker4, 0);
color1_1 += vecDot(make_char4(inp4[1].y, inp4[2].y, inp4[3].y, inp4[5].y), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].y, ker4[1][2].y, ker4[2][0].y, ker4[2][1].y);
color1_0 += vecDot(make_char4(inp4[5].y, inp4[6].y, inp4[8].y, inp4[9].y), tmp_ker4, 0);
color1_1 += vecDot(make_char4(inp4[6].y, inp4[7].y, inp4[9].y, inp4[10].y), tmp_ker4, 0);
color1_0 += inp4[10].y * ker4[2][2].y;
color1_1 += inp4[11].y * ker4[2][2].y;
// 3rd channel
tmp_ker4 = make_char4(ker4[0][0].z, ker4[0][1].z, ker4[0][2].z, ker4[1][0].z);
color2_0 += vecDot(make_char4(inp4[0].z, inp4[1].z, inp4[2].z, inp4[4].z), tmp_ker4, 0);
color2_1 += vecDot(make_char4(inp4[1].z, inp4[2].z, inp4[3].z, inp4[5].z), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].z, ker4[1][2].z, ker4[2][0].z, ker4[2][1].z);
color2_0 += vecDot(make_char4(inp4[5].z, inp4[6].z, inp4[8].z, inp4[9].z), tmp_ker4, 0);
color2_1 += vecDot(make_char4(inp4[6].z, inp4[7].z, inp4[9].z, inp4[10].z), tmp_ker4, 0);
color2_0 += inp4[10].z * ker4[2][2].z;
color2_1 += inp4[11].z * ker4[2][2].z;
// 4th channel
tmp_ker4 = make_char4(ker4[0][0].w, ker4[0][1].w, ker4[0][2].w, ker4[1][0].w);
color3_0 += vecDot(make_char4(inp4[0].w, inp4[1].w, inp4[2].w, inp4[4].w), tmp_ker4, 0);
color3_1 += vecDot(make_char4(inp4[1].w, inp4[2].w, inp4[3].w, inp4[5].w), tmp_ker4, 0);
tmp_ker4 = make_char4(ker4[1][1].w, ker4[1][2].w, ker4[2][0].w, ker4[2][1].w);
color3_0 += vecDot(make_char4(inp4[5].w, inp4[6].w, inp4[8].w, inp4[9].w), tmp_ker4, 0);
color3_1 += vecDot(make_char4(inp4[6].w, inp4[7].w, inp4[9].w, inp4[10].w), tmp_ker4, 0);
color3_0 += inp4[10].w * ker4[2][2].w;
color3_1 += inp4[11].w * ker4[2][2].w;
// Multiple scale
float4 scale4 = ((float4 *)(scale + oz))[0];
color0_0 = __float2int_rn((float)color0_0 * scale4.x);
color0_1 = __float2int_rn((float)color0_1 * scale4.x);
color1_0 = __float2int_rn((float)color1_0 * scale4.y);
color1_1 = __float2int_rn((float)color1_1 * scale4.y);
color2_0 = __float2int_rn((float)color2_0 * scale4.z);
color2_1 = __float2int_rn((float)color2_1 * scale4.z);
color3_0 = __float2int_rn((float)color3_0 * scale4.w);
color3_1 = __float2int_rn((float)color3_1 * scale4.w);
// Clamp
color0_0 = max(color0_0, minV);
color0_0 = min(color0_0, maxV);
color0_1 = max(color0_1, minV);
color0_1 = min(color0_1, maxV);
color1_0 = max(color1_0, minV);
color1_0 = min(color1_0, maxV);
color1_1 = max(color1_1, minV);
color1_1 = min(color1_1, maxV);
color2_0 = max(color2_0, minV);
color2_0 = min(color2_0, maxV);
color2_1 = max(color2_1, minV);
color2_1 = min(color2_1, maxV);
color3_0 = max(color3_0, minV);
color3_0 = min(color3_0, maxV);
color3_1 = max(color3_1, minV);
color3_1 = min(color3_1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
((char4*)(output + dst_offset))[0] = make_char4((color0_0), (color1_0), (color2_0), (color3_0));
((char4*)(output + dst_offset + c_p))[0] = make_char4((color0_1), (color1_1), (color2_1), (color3_1));
}
}
DepthwiseConvInt8Execution::DepthwiseConvInt8Execution(Backend* backend, const Op* op, std::shared_ptr<ConvInt8CutlassExecution::Resource> res) : ConvInt8CutlassExecution(backend, op, res) {
mOp = op;
mResource = res;//
}
DepthwiseConvInt8Execution::~DepthwiseConvInt8Execution() {
// Do nothing
}
bool DepthwiseConvInt8Execution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (nullptr == dst) {
return true;
}
auto exe = new DepthwiseConvInt8Execution(bn, op, mResource);
*dst = exe;
return true;
}
ErrorCode DepthwiseConvInt8Execution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto input = inputs[0];
auto output = outputs[0];
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
std::vector<float> inputQuantInfo = TensorUtils::getQuantInfo(input);
std::vector<float> outputQuantInfo = TensorUtils::getQuantInfo(output);
mResource->updateInputOutputScale(inputQuantInfo, outputQuantInfo);
runtime->memcpy(mResource->mBiasInt32Ptr, mResource->mBiasInt32Vec, mResource->mOutputChannelPack*sizeof(int32_t), MNNMemcpyHostToDevice);
runtime->memcpy(mResource->mScaleFloatPtr, mResource->mScaleFloatVec, mResource->mOutputChannelPack*sizeof(float), MNNMemcpyHostToDevice);
mPads = ConvolutionCommon::convolutionPad(input, output, mOp->main_as_Convolution2D()->common());
auto mCommon = mOp->main_as_Convolution2D()->common();
const int src_width = input->width();
const int src_height = input->height();
const int dst_width = output->width();
const int dst_height = output->height();
const int strideY = mCommon->strideY();
const int strideX = mCommon->strideX();
const int dilateY = mCommon->dilateY();
const int dilateX = mCommon->dilateX();
const int kernel_height = mCommon->kernelY();
const int kernel_width = mCommon->kernelX();
mStrides = std::make_pair(strideX, strideY);
mDilates = std::make_pair(dilateX, dilateY);
mKernels = std::make_pair(kernel_width, kernel_height);
auto clamp_max = mResource->mClampMax;
auto clamp_min = mResource->mClampMin;
if (mCommon->relu()) {
clamp_min = 0;
}
if (mCommon->relu6()) {
clamp_min = 0;
clamp_max = 6;
}
mClamps = std::make_pair(clamp_max, clamp_min);
// MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d\n", mKernels.first, mKernels.second, mStrides.first, mStrides.second, mDilates.first, mDilates.second, mPads.first, mPads.second);
return NO_ERROR;
}
ErrorCode DepthwiseConvInt8Execution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
auto input = inputs[0];
auto output = outputs[0];
const int batch = input->batch();
const int c = input->channel();
const int c_p = UP_DIV(c, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
const int iw = input->width();
const int ih = input->height();
const int ow = output->width();
const int oh = output->height();
const int total = batch * c_p * oh * ow;
const int k_p = UP_DIV(mKernels.first * mKernels.second, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
const auto weightPtr = mResource->mWeightInt8Ptr;
const auto biasPtr = mResource->mBiasInt32Ptr;
const auto scalePtr = mResource->mScaleFloatPtr;
int limitThreads = UP_DIV(total, prop.multiProcessorCount);
int threads_num = ALIMIN(prop.maxThreadsPerBlock / 2, limitThreads);
int block_num = prop.multiProcessorCount;
DivModFast d_oc(c_p / 4);
DivModFast d_ow(ow);
DivModFast d_oh(oh);
if(mKernels.first==3 && mKernels.second==3 && mStrides.first==1 && mStrides.second==1 && mPads.first==1 && mPads.second==1 && ow % 2 ==0) {
DivModFast d_ow2(ow/2);
CONV_DW3x3S1_INT8_OPT<<<block_num, threads_num>>>((const int8_t*)inputs[0]->deviceId(), (const int8_t*)weightPtr,
(const int32_t*)biasPtr, (const float*)scalePtr, (int8_t*)outputs[0]->deviceId(),
mClamps.first, mClamps.second, iw, ih, c, c_p, ow, oh, mKernels.first, mKernels.second, k_p,
mDilates.first, mDilates.second, mStrides.first, mStrides.second, mPads.first, mPads.second,
total, d_oc, d_ow2, d_oh);
checkKernelErrors;
return NO_ERROR;
}
block_num = runtime->blocks_num(total);
threads_num = runtime->threads_num();
CONV_DW_INT8_<<<block_num, threads_num>>>((const int8_t*)inputs[0]->deviceId(), (const int8_t*)weightPtr,
(const int32_t*)biasPtr, (const float*)scalePtr, (int8_t*)outputs[0]->deviceId(),
mClamps.first, mClamps.second, iw, ih, c, c_p, ow, oh, mKernels.first, mKernels.second, k_p,
mDilates.first, mDilates.second, mStrides.first, mStrides.second, mPads.first, mPads.second,
total, d_oc, d_ow, d_oh);
checkKernelErrors;
return NO_ERROR;
}
class DepthWiseConvInt8ExecutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (inputs.size() > 1) {
MNN_PRINT("OpType_DepthwiseConvInt8 CUDA not support multi input!, fall back...\n");
return nullptr;
}
std::shared_ptr<ConvInt8CutlassExecution::Resource> resource(new ConvInt8CutlassExecution::Resource(backend, op));
return new DepthwiseConvInt8Execution(backend, op, resource);
}
};
static CUDACreatorRegister<DepthWiseConvInt8ExecutionCreator> __init(OpType_DepthwiseConvInt8);
} // namespace CUDA
} // namespace MNN
#endif |
1d26128e4a51430769bc8327f12398d4515cc5b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
dim3 blocks((output->size[2]+15)/16, output->size[1], output->size[0]);
dim3 threads(32,16);
/* assume BHWD */
hipLaunchKernelGGL(( bilinearSamplingFromGrid) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 3),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, output, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float topLeftDotProduct = 0;
float topRightDotProduct = 0;
float bottomLeftDotProduct = 0;
float bottomRightDotProduct = 0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftIsIn)
{
float inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress + t], xWeightTopLeft * yWeightTopLeft * gradOutValue);
}
if(topRightIsIn)
{
float inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress + t], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue);
}
if(bottomLeftIsIn)
{
float inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress + t], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue);
}
if(bottomRightIsIn)
{
float inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress + t], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
if(threadIdx.x==0)
{
gridData[threadIdx.y*2] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*2+1] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
hipLaunchKernelGGL(( backwardBilinearSampling <false>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
hipLaunchKernelGGL(( backwardBilinearSampling <true>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
0,
0,
0,
0,
0,
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerBHWD__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerBHWD_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerBHWD_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerBHWD__, "nn");
lua_pop(L,1);
}
| 1d26128e4a51430769bc8327f12398d4515cc5b1.cu | #include "utils.h"
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
dim3 blocks((output->size[2]+15)/16, output->size[1], output->size[0]);
dim3 threads(32,16);
/* assume BHWD */
bilinearSamplingFromGrid <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 3),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, output, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float topLeftDotProduct = 0;
float topRightDotProduct = 0;
float bottomLeftDotProduct = 0;
float bottomRightDotProduct = 0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftIsIn)
{
float inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress + t], xWeightTopLeft * yWeightTopLeft * gradOutValue);
}
if(topRightIsIn)
{
float inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress + t], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue);
}
if(bottomLeftIsIn)
{
float inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress + t], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue);
}
if(bottomRightIsIn)
{
float inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress + t], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
if(threadIdx.x==0)
{
gridData[threadIdx.y*2] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*2+1] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardBilinearSampling <false> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardBilinearSampling <true> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
0,
0,
0,
0,
0,
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerBHWD__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerBHWD_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerBHWD_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerBHWD__, "nn");
lua_pop(L,1);
}
|
d707e8edbcf2a9031ed8b7a700ec9c3cca425171.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
typedef struct {
float posx;
float posy;
float range;
float temp;
} heatsrc_t;
typedef struct {
unsigned maxiter; // maximum number of iterations
unsigned resolution; // spatial resolution
int algorithm; // 0=>Jacobi, 1=>Gauss
unsigned visres; // visualization resolution
float *u, *uhelp;
float *uvis;
unsigned numsrcs; // number of heat sources
heatsrc_t *heatsrcs;
} algoparam_t;
// function declarations
int read_input(FILE *infile, algoparam_t *param);
void print_params(algoparam_t *param);
int initialize(algoparam_t *param);
int finalize(algoparam_t *param);
void write_image(FILE *f, float *u,
unsigned sizex, unsigned sizey);
int coarsen(float *uold, unsigned oldx, unsigned oldy,
float *unew, unsigned newx, unsigned newy);
__global__ void gpu_Heat(float *h, float *g, int N);
__global__ void gpu_Diff_Reduce_Atomic(float *h, float *g, float *s, int N);
__global__ void gpu_Diff_Reduce(float *h, float *g, float *s, int N);
__global__ void gpu_Reduce(float *s, int N, int skip);
#define NB 8
#define min(a, b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
for (int i = 1; i < sizex - 1; i++)
for (int j = 1; j < sizey - 1; j++) {
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
float cpu_jacobi(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex / nbx;
nby = NB;
by = sizey / nby;
for (int ii = 0; ii < nbx; ii++)
for (int jj = 0; jj < nby; jj++)
for (int i = 1 + ii * bx; i <= min((ii + 1) * bx, sizex - 2); i++)
for (int j = 1 + jj * by; j <= min((jj + 1) * by, sizey - 2); j++) {
utmp[i * sizey + j] = 0.25 * (u[i * sizey + (j - 1)] + // left
u[i * sizey + (j + 1)] + // right
u[(i - 1) * sizey + j] + // top
u[(i + 1) * sizey + j]); // bottom
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
void usage(char *s) {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
}
int main(int argc, char *argv[]) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
// algorithmic parameters
algoparam_t param;
int np;
// check arguments
if (argc < 4) {
usage(argv[0]);
return 1;
}
// check input file
if (!(infile = fopen(argv[1], "r"))) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename = "heat.ppm";
if (!(resfile = fopen(resfilename, "w"))) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
// check input
if (!read_input(infile, ¶m)) {
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
int Grid_Dim, Block_Dim; // Grid and Block structure values
if (strcmp(argv[2], "-t") == 0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np / Block_Dim + ((np % Block_Dim) != 0);;
if ((Block_Dim * Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
} else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n");
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
// starting time
float elapsed_time_ms; // which is applicable for asynchronous code also
hipEvent_t start, stop; // using cuda events to measure time
hipEventCreate(&start); // instrument code to measure start time
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipEventSynchronize(start);
iter = 0;
float residual;
while (1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float *tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter >= param.maxiter) break;
}
hipEventRecord(stop, 0); // instrument code to measue end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
// Flop count after iter iterations
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
finalize(¶m);
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim);
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
// starting time
hipEventRecord(start, 0);
hipEventSynchronize(start);
float *dev_u, *dev_uhelp;
// Allocation
hipMalloc(&dev_u, np * np * sizeof(float));
hipMalloc(&dev_uhelp, np * np * sizeof(float));
#if defined RESIDUAL_GPU_DIFF
// Allocate for auxiliar
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
float *dev_sum, sum[Grid1D];
hipMalloc(&dev_sum, Grid1D * sizeof(float));
#elif defined RESIDUAL_GPU_REDUCE
// Allocate for auxiliar
float *dev_sum;
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
hipMalloc(&dev_sum, Grid1D * sizeof(float));
#elif defined RESIDUAL_GPU_ATOMIC
// Allocate for auxiliar
float *dev_sum;
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
hipMalloc(&dev_sum, sizeof(int));
#endif
// Copy to Device
hipMemcpy(dev_u, param.u, np * np * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_uhelp, param.uhelp, np * np * sizeof(float), hipMemcpyHostToDevice);
iter = 0;
while (1) {
hipLaunchKernelGGL(( gpu_Heat) , dim3(Grid), dim3(Block) , 0, 0, dev_u, dev_uhelp, np);
hipDeviceSynchronize(); // wait for all threads to complete
#if defined RESIDUAL_CPU
// Copy from device to compute residual on CPU
hipMemcpy( param.u, dev_u, np*np*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( param.uhelp, dev_uhelp, np*np*sizeof(float), hipMemcpyDeviceToHost);
residual = cpu_residual (param.u, param.uhelp, np, np);
#elif defined RESIDUAL_GPU_DIFF
residual = 0;
hipLaunchKernelGGL(( gpu_Diff_Reduce) , dim3(Grid1D), dim3(BLOCK_SIZE) , 0, 0, dev_u, dev_uhelp, dev_sum, np);
hipDeviceSynchronize();
hipMemcpy(&sum, dev_sum, Grid1D*sizeof(float), hipMemcpyDeviceToHost);
for (int i =0; i < Grid1D; i++) residual += sum[i];
#elif defined RESIDUAL_GPU_REDUCE
residual = 0;
hipLaunchKernelGGL(( gpu_Diff_Reduce) , dim3(Grid1D), dim3(BLOCK_SIZE) , 0, 0, dev_u, dev_uhelp, dev_sum, np);
hipDeviceSynchronize();
int grid = Grid1D;
for (int skip = 1; grid > 1; skip = skip * BLOCK_SIZE) {
grid = ceil(float(grid) / BLOCK_SIZE);
hipLaunchKernelGGL(( gpu_Reduce) , dim3(grid), dim3(BLOCK_SIZE) , 0, 0, dev_sum, np, skip);
hipDeviceSynchronize();
}
hipMemcpy(&residual, dev_sum, sizeof(float), hipMemcpyDeviceToHost);
#elif defined RESIDUAL_GPU_ATOMIC
hipMemset(dev_sum, 0, sizeof(float));
hipLaunchKernelGGL(( gpu_Diff_Reduce_Atomic) , dim3(Grid1D), dim3(BLOCK_SIZE) , 0, 0, dev_u, dev_uhelp, dev_sum, np);
hipDeviceSynchronize();
hipMemcpy(&residual, dev_sum, sizeof(float), hipMemcpyDeviceToHost);
#else
#error "Residual mode must be defined"
#endif
// fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
float *tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter >= param.maxiter) break;
}
// Copy from device
hipMemcpy(param.u, dev_u, np * np * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(param.uhelp, dev_uhelp, np * np * sizeof(float), hipMemcpyDeviceToHost);
// Free memory
hipFree(dev_u);
hipFree(dev_uhelp);
hipEventRecord(stop, 0); // instrument code to measue end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
hipEventDestroy(start);
hipEventDestroy(stop);
// for plot...
coarsen(param.u, np, np,
param.uvis, param.visres + 2, param.visres + 2);
write_image(resfile, param.uvis,
param.visres + 2,
param.visres + 2);
finalize(¶m);
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize(algoparam_t *param) {
int i, j;
float dist;
// total number of points (including border)
const int np = param->resolution + 2;
//
// allocate memory
//
(param->u) = (float *) calloc(sizeof(float), np * np);
(param->uhelp) = (float *) calloc(sizeof(float), np * np);
(param->uvis) = (float *) calloc(sizeof(float),
(param->visres + 2) *
(param->visres + 2));
if (!(param->u) || !(param->uhelp) || !(param->uvis)) {
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for (i = 0; i < param->numsrcs; i++) {
/* top row */
for (j = 0; j < np; j++) {
dist = sqrt(pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posx, 2) +
pow(param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* bottom row */
for (j = 0; j < np; j++) {
dist = sqrt(pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posx, 2) +
pow(1 - param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[(np - 1) * np + j] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* leftmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(pow(param->heatsrcs[i].posx, 2) +
pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* rightmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(pow(1 - param->heatsrcs[i].posx, 2) +
pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np + (np - 1)] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
}
// Copy u into uhelp
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for (j = 0; j < np; j++)
for (i = 0; i < np; i++)
*putmp++ = *pu++;
return 1;
}
/*
* free used memory
*/
int finalize(algoparam_t *param) {
if (param->u) {
free(param->u);
param->u = 0;
}
if (param->uhelp) {
free(param->uhelp);
param->uhelp = 0;
}
if (param->uvis) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image(FILE *f, float *u,
unsigned sizex, unsigned sizey) {
// RGB table
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j = 1023;
// prepare RGB table
for (i = 0; i < 256; i++) {
r[j] = 255;
g[j] = i;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 255 - i;
g[j] = 255;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255;
b[j] = i;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255 - i;
b[j] = 255;
j--;
}
min = DBL_MAX;
max = -DBL_MAX;
// find minimum and maximum
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
if (u[i * sizex + j] > max)
max = u[i * sizex + j];
if (u[i * sizex + j] < min)
min = u[i * sizex + j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
k = (int) (1023.0 * (u[i * sizex + j] - min) / (max - min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen(float *uold, unsigned oldx, unsigned oldy,
float *unew, unsigned newx, unsigned newy) {
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx > newx)
stepx = oldx / newx;
else {
stepx = 1;
stopx = oldx;
}
if (oldy > newy)
stepy = oldy / newy;
else {
stepy = 1;
stopy = oldy;
}
// NOTE: this only takes the top-left corner,
// and doesnt' do any real coarsening
for (i = 0; i < stopy - 1; i++) {
for (j = 0; j < stopx - 1; j++) {
unew[i * newx + j] = uold[i * oldx * stepy + j * stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input(FILE *infile, algoparam_t *param) {
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->maxiter));
if (n != 1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->resolution));
if (n != 1)
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs));
if (n != 1)
return 0;
(param->heatsrcs) =
(heatsrc_t *) malloc(sizeof(heatsrc_t) * (param->numsrcs));
for (i = 0; i < param->numsrcs; i++) {
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%f %f %f %f",
&(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy),
&(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp));
if (n != 4)
return 0;
}
return 1;
}
void print_params(algoparam_t *param) {
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for (i = 0; i < param->numsrcs; i++) {
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n",
i + 1,
param->heatsrcs[i].posx,
param->heatsrcs[i].posy,
param->heatsrcs[i].range,
param->heatsrcs[i].temp);
}
}
| d707e8edbcf2a9031ed8b7a700ec9c3cca425171.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
typedef struct {
float posx;
float posy;
float range;
float temp;
} heatsrc_t;
typedef struct {
unsigned maxiter; // maximum number of iterations
unsigned resolution; // spatial resolution
int algorithm; // 0=>Jacobi, 1=>Gauss
unsigned visres; // visualization resolution
float *u, *uhelp;
float *uvis;
unsigned numsrcs; // number of heat sources
heatsrc_t *heatsrcs;
} algoparam_t;
// function declarations
int read_input(FILE *infile, algoparam_t *param);
void print_params(algoparam_t *param);
int initialize(algoparam_t *param);
int finalize(algoparam_t *param);
void write_image(FILE *f, float *u,
unsigned sizex, unsigned sizey);
int coarsen(float *uold, unsigned oldx, unsigned oldy,
float *unew, unsigned newx, unsigned newy);
__global__ void gpu_Heat(float *h, float *g, int N);
__global__ void gpu_Diff_Reduce_Atomic(float *h, float *g, float *s, int N);
__global__ void gpu_Diff_Reduce(float *h, float *g, float *s, int N);
__global__ void gpu_Reduce(float *s, int N, int skip);
#define NB 8
#define min(a, b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
for (int i = 1; i < sizex - 1; i++)
for (int j = 1; j < sizey - 1; j++) {
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
float cpu_jacobi(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex / nbx;
nby = NB;
by = sizey / nby;
for (int ii = 0; ii < nbx; ii++)
for (int jj = 0; jj < nby; jj++)
for (int i = 1 + ii * bx; i <= min((ii + 1) * bx, sizex - 2); i++)
for (int j = 1 + jj * by; j <= min((jj + 1) * by, sizey - 2); j++) {
utmp[i * sizey + j] = 0.25 * (u[i * sizey + (j - 1)] + // left
u[i * sizey + (j + 1)] + // right
u[(i - 1) * sizey + j] + // top
u[(i + 1) * sizey + j]); // bottom
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
void usage(char *s) {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
}
int main(int argc, char *argv[]) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
// algorithmic parameters
algoparam_t param;
int np;
// check arguments
if (argc < 4) {
usage(argv[0]);
return 1;
}
// check input file
if (!(infile = fopen(argv[1], "r"))) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename = "heat.ppm";
if (!(resfile = fopen(resfilename, "w"))) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
// check input
if (!read_input(infile, ¶m)) {
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
int Grid_Dim, Block_Dim; // Grid and Block structure values
if (strcmp(argv[2], "-t") == 0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np / Block_Dim + ((np % Block_Dim) != 0);;
if ((Block_Dim * Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
} else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n");
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
// starting time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaEvent_t start, stop; // using cuda events to measure time
cudaEventCreate(&start); // instrument code to measure start time
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
iter = 0;
float residual;
while (1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float *tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter >= param.maxiter) break;
}
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
// Flop count after iter iterations
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
finalize(¶m);
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim);
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
// starting time
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
float *dev_u, *dev_uhelp;
// Allocation
cudaMalloc(&dev_u, np * np * sizeof(float));
cudaMalloc(&dev_uhelp, np * np * sizeof(float));
#if defined RESIDUAL_GPU_DIFF
// Allocate for auxiliar
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
float *dev_sum, sum[Grid1D];
cudaMalloc(&dev_sum, Grid1D * sizeof(float));
#elif defined RESIDUAL_GPU_REDUCE
// Allocate for auxiliar
float *dev_sum;
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
cudaMalloc(&dev_sum, Grid1D * sizeof(float));
#elif defined RESIDUAL_GPU_ATOMIC
// Allocate for auxiliar
float *dev_sum;
int Grid1D = ceil(float(np * np) / BLOCK_SIZE);
cudaMalloc(&dev_sum, sizeof(int));
#endif
// Copy to Device
cudaMemcpy(dev_u, param.u, np * np * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_uhelp, param.uhelp, np * np * sizeof(float), cudaMemcpyHostToDevice);
iter = 0;
while (1) {
gpu_Heat <<< Grid, Block >>> (dev_u, dev_uhelp, np);
cudaThreadSynchronize(); // wait for all threads to complete
#if defined RESIDUAL_CPU
// Copy from device to compute residual on CPU
cudaMemcpy( param.u, dev_u, np*np*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( param.uhelp, dev_uhelp, np*np*sizeof(float), cudaMemcpyDeviceToHost);
residual = cpu_residual (param.u, param.uhelp, np, np);
#elif defined RESIDUAL_GPU_DIFF
residual = 0;
gpu_Diff_Reduce <<< Grid1D, BLOCK_SIZE >>> (dev_u, dev_uhelp, dev_sum, np);
cudaThreadSynchronize();
cudaMemcpy(&sum, dev_sum, Grid1D*sizeof(float), cudaMemcpyDeviceToHost);
for (int i =0; i < Grid1D; i++) residual += sum[i];
#elif defined RESIDUAL_GPU_REDUCE
residual = 0;
gpu_Diff_Reduce <<< Grid1D, BLOCK_SIZE >>> (dev_u, dev_uhelp, dev_sum, np);
cudaThreadSynchronize();
int grid = Grid1D;
for (int skip = 1; grid > 1; skip = skip * BLOCK_SIZE) {
grid = ceil(float(grid) / BLOCK_SIZE);
gpu_Reduce <<< grid, BLOCK_SIZE >>> (dev_sum, np, skip);
cudaThreadSynchronize();
}
cudaMemcpy(&residual, dev_sum, sizeof(float), cudaMemcpyDeviceToHost);
#elif defined RESIDUAL_GPU_ATOMIC
cudaMemset(dev_sum, 0, sizeof(float));
gpu_Diff_Reduce_Atomic <<< Grid1D, BLOCK_SIZE >>> (dev_u, dev_uhelp, dev_sum, np);
cudaThreadSynchronize();
cudaMemcpy(&residual, dev_sum, sizeof(float), cudaMemcpyDeviceToHost);
#else
#error "Residual mode must be defined"
#endif
// fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
float *tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter >= param.maxiter) break;
}
// Copy from device
cudaMemcpy(param.u, dev_u, np * np * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(param.uhelp, dev_uhelp, np * np * sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(dev_u);
cudaFree(dev_uhelp);
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// for plot...
coarsen(param.u, np, np,
param.uvis, param.visres + 2, param.visres + 2);
write_image(resfile, param.uvis,
param.visres + 2,
param.visres + 2);
finalize(¶m);
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize(algoparam_t *param) {
int i, j;
float dist;
// total number of points (including border)
const int np = param->resolution + 2;
//
// allocate memory
//
(param->u) = (float *) calloc(sizeof(float), np * np);
(param->uhelp) = (float *) calloc(sizeof(float), np * np);
(param->uvis) = (float *) calloc(sizeof(float),
(param->visres + 2) *
(param->visres + 2));
if (!(param->u) || !(param->uhelp) || !(param->uvis)) {
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for (i = 0; i < param->numsrcs; i++) {
/* top row */
for (j = 0; j < np; j++) {
dist = sqrt(pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posx, 2) +
pow(param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* bottom row */
for (j = 0; j < np; j++) {
dist = sqrt(pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posx, 2) +
pow(1 - param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[(np - 1) * np + j] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* leftmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(pow(param->heatsrcs[i].posx, 2) +
pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* rightmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(pow(1 - param->heatsrcs[i].posx, 2) +
pow((float) j / (float) (np - 1) -
param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np + (np - 1)] +=
(param->heatsrcs[i].range - dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
}
// Copy u into uhelp
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for (j = 0; j < np; j++)
for (i = 0; i < np; i++)
*putmp++ = *pu++;
return 1;
}
/*
* free used memory
*/
int finalize(algoparam_t *param) {
if (param->u) {
free(param->u);
param->u = 0;
}
if (param->uhelp) {
free(param->uhelp);
param->uhelp = 0;
}
if (param->uvis) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image(FILE *f, float *u,
unsigned sizex, unsigned sizey) {
// RGB table
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j = 1023;
// prepare RGB table
for (i = 0; i < 256; i++) {
r[j] = 255;
g[j] = i;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 255 - i;
g[j] = 255;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255;
b[j] = i;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255 - i;
b[j] = 255;
j--;
}
min = DBL_MAX;
max = -DBL_MAX;
// find minimum and maximum
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
if (u[i * sizex + j] > max)
max = u[i * sizex + j];
if (u[i * sizex + j] < min)
min = u[i * sizex + j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
k = (int) (1023.0 * (u[i * sizex + j] - min) / (max - min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen(float *uold, unsigned oldx, unsigned oldy,
float *unew, unsigned newx, unsigned newy) {
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx > newx)
stepx = oldx / newx;
else {
stepx = 1;
stopx = oldx;
}
if (oldy > newy)
stepy = oldy / newy;
else {
stepy = 1;
stopy = oldy;
}
// NOTE: this only takes the top-left corner,
// and doesnt' do any real coarsening
for (i = 0; i < stopy - 1; i++) {
for (j = 0; j < stopx - 1; j++) {
unew[i * newx + j] = uold[i * oldx * stepy + j * stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input(FILE *infile, algoparam_t *param) {
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->maxiter));
if (n != 1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->resolution));
if (n != 1)
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs));
if (n != 1)
return 0;
(param->heatsrcs) =
(heatsrc_t *) malloc(sizeof(heatsrc_t) * (param->numsrcs));
for (i = 0; i < param->numsrcs; i++) {
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%f %f %f %f",
&(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy),
&(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp));
if (n != 4)
return 0;
}
return 1;
}
void print_params(algoparam_t *param) {
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for (i = 0; i < param->numsrcs; i++) {
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n",
i + 1,
param->heatsrcs[i].posx,
param->heatsrcs[i].posy,
param->heatsrcs[i].range,
param->heatsrcs[i].temp);
}
}
|
64713e53798db4cde30589e592fa831eefdf5df8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
for (int i=0; i<10000000; i++){
a[threadIdx.x] += b[threadIdx.x];
}
}
int main()
{
int a[N]; // = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int b[N]; // = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 1;
}
struct timeval start_tv;
gettimeofday(&start_tv,NULL);
//printf("time %u:%u\n",tv.tv_sec,tv.tv_usec);
//time_t t = time(NULL);
//struct tm tm = *localtime(&t);
//printf("year: %d \n", tm.tm_year);
//std::time_t startTime = std::time(nullptr);
//time_t startTime = time(NULL);
//time(&startTime);
int *ad;
int *bd;
const int csize = N*sizeof(int);
const int isize = N*sizeof(int);
//for (int j=0; j<10000; j++){
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
//}
hipDeviceSynchronize();
//time_t endTime;
//time(&endTime);
struct timeval end_tv;
gettimeofday(&end_tv,NULL);
for (int i=0; i<N; i++){
printf("%d ", a[i]);
}
printf("\n");
//printf("start time: %f \n", startTime);
//printf("end time: %f \n", endTime);
//printf("time used: %f \n", endTime-startTime);
if(end_tv.tv_usec >= start_tv.tv_usec){
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
}else{
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
}
return EXIT_SUCCESS;
}
| 64713e53798db4cde30589e592fa831eefdf5df8.cu | #include <stdio.h>
#include <sys/time.h>
#include <time.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
for (int i=0; i<10000000; i++){
a[threadIdx.x] += b[threadIdx.x];
}
}
int main()
{
int a[N]; // = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int b[N]; // = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 1;
}
struct timeval start_tv;
gettimeofday(&start_tv,NULL);
//printf("time %u:%u\n",tv.tv_sec,tv.tv_usec);
//time_t t = time(NULL);
//struct tm tm = *localtime(&t);
//printf("year: %d \n", tm.tm_year);
//std::time_t startTime = std::time(nullptr);
//time_t startTime = time(NULL);
//time(&startTime);
int *ad;
int *bd;
const int csize = N*sizeof(int);
const int isize = N*sizeof(int);
//for (int j=0; j<10000; j++){
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
//}
cudaDeviceSynchronize();
//time_t endTime;
//time(&endTime);
struct timeval end_tv;
gettimeofday(&end_tv,NULL);
for (int i=0; i<N; i++){
printf("%d ", a[i]);
}
printf("\n");
//printf("start time: %f \n", startTime);
//printf("end time: %f \n", endTime);
//printf("time used: %f \n", endTime-startTime);
if(end_tv.tv_usec >= start_tv.tv_usec){
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
}else{
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
}
return EXIT_SUCCESS;
}
|
a9351fd0482368706d7761e38d250dda02f5dc58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define DATATYPE TensorUtils<THCTensor>::DataType
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) __half2float(input)
#define F2H(input) __float2half(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T* ir = &DEVICE_LINEAR_GET(Input, offset+0*hsz);
T* ii = &DEVICE_LINEAR_GET(Input, offset+1*hsz);
T* in = &DEVICE_LINEAR_GET(Input, offset+2*hsz);
T* hr = &DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T* hi = &DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T rg, ig, ng;
rg = *ir + *hr + b1r + b2r;
ig = *ii + *hi + b1i + b2i;
TensorSigmoidOp<real>()(&rg, &rg);
TensorSigmoidOp<real>()(&ig, &ig);
ng = *in + b1n + rg * (hn + b2n);
ng = THCNumerics<T>::tanh(ng);
*hy = ng + ig * (hx - ng);
//SAVE FOR BACKWARDS
*ir = rg;
*ii = ig;
*in = ng;
*hr = hx;
*hi = hn + b2n;
#else
float rg, ig, ng;
rg = H2F(*ir) + H2F(*hr) + H2F(b1r) + H2F(b2r);
ig = H2F(*ii) + H2F(*hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<float>()(&rg, &rg);
TensorSigmoidOp<float>()(&ig, &ig);
ng = H2F(*in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<float>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
*ir = F2H(rg);
*ii = F2H(ig);
*in = F2H(ng);
*hr = hx;
*hi = F2H( H2F(hn) + H2F(b2n) );
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradinput,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;;
//will return input grads here
T* rg = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ig = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* ng = &DEVICE_LINEAR_GET(input, offset+2*hsz);
//will return hidden grads here
T* hx = &DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T* hn = &DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T* oghn=&DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T* gi = &DEVICE_LINEAR_GET(gradinput, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
#ifndef THC_REAL_IS_HALF
T gig = (go)*(*hx-*ng)*( 1-(*ig) )*(*ig);
T ghx = (go)*(*ig);
T gin = (go)*(1-*ig)*( 1-(*ng)*(*ng) );
T ghn = (gin) * (*rg);
T grg = (gin)*(*hn)*( 1-(*rg) )*(*rg);
*gi = ghx;
*rg = grg;
*ig = gig;
*ng = gin;
*hx = grg;
*hn = gig;
*oghn = ghn;
#else
float gig = H2F(go)*( H2F(*hx)-H2F(*ng) )*( 1-H2F(*ig) )*H2F(*ig);
float ghx = H2F(go)*H2F(*ig);
float gin = H2F(go)*( 1-H2F(*ig) )*( 1-H2F(*ng)*H2F(*ng) );
float ghn = H2F(gin) * H2F(*rg);
float grg = H2F(gin)*H2F(*hn)*( 1-H2F(*rg) )*H2F(*rg);
*gi = F2H(ghx);
*rg = F2H(grg);
*ig = F2H(gig);
*ng = F2H(gin);
*hx = F2H(grg);
*hn = F2H(gig);
*oghn = F2H(ghn);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T ig, fg, cg, og;
ig = *iig + hig + b1i + b2i;
fg = *ifg + hfg + b1f + b2f;
cg = *icg + hcg + b1c + b2c;
og = *iog + hog + b1o + b2o;
TensorSigmoidOp<real>()(&ig, &ig);
TensorSigmoidOp<real>()(&fg, &fg);
cg = THCNumerics<T>::tanh(cg);
TensorSigmoidOp<real>()(&og, &og);
*cy = (fg * cx) + (ig * cg);
*hy = og * THCNumerics<T>::tanh(*cy);
*iig = ig;
*ifg = fg;
*icg = cg;
*iog = og;
#else
float ig, fg, cg, og;
float f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<float>()(&ig, &ig);
TensorSigmoidOp<float>()(&fg, &fg);
cg = THCNumerics<float>::tanh(cg);
TensorSigmoidOp<float>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<float>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradinput,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(input, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(input, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(input, offset+2*hsz);
T og = DEVICE_LINEAR_GET(input, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(hidden, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradinput, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
#ifndef THC_REAL_IS_HALF
T gcx = THCNumerics<T>::tanh(cy);
T gog = go * gcx;
gcx = go * og * ( 1 - gcx*gcx) + goc;
T gig = gcx * cg;
T gfg = gcx * cx;
T gcg = gcx * ig;
gcx = gcx * fg;
gig = gig * (1-ig) * ig;
gfg = gfg * (1-fg) * fg;
gcg = gcg * (1-cg*cg);
gog = gog * (1-og) * og;
*ih = gig;
*fh = gfg;
*ch = gcg;
*oh = gog;
*gi = gcx;
#else
float gcx = THCNumerics<float>::tanh(H2F(cy));
float gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
float gcg = gcx * H2F(fg);
float gfg = gcx * H2F(cg);
float gig = gcx * H2F(cx);
gog = gog * ( (1-H2F(og))*H2F(og) );
gcg = gcg * (1-H2F(cg)*H2F(cg));
gfg = gfg * ( (1-H2F(fg))*H2F(fg) );
gig = gig * ( (1-H2F(ig))*H2F(ig) );
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
#endif
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMForward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMBackward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, cxI, cyI, \
gradoutI, gradoutcI, gradinI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUForward)<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, bias1I, bias2I, hxI, hyI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUBackward) \
<DATATYPE, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputI, hiddenI, gradoutI, gradinI, hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == THCTensor_(nElement)(state, bias1) &&
hid_size*4 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<unsigned int>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<unsigned long>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInput)
{
int maxDim = THNN_(minIndexType)
(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradoutcI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutputCell);
TensorInfo<DATATYPE, INDTYPE> gradinI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInput);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradinI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInput)
{
THCTensor_(resizeAs)(state, gradInput, gradOutput);
THCUNN_assertSameGPU(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<unsigned int>
(state, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
}else{
THNN_(LSTM_back_ind_wrap)<unsigned long>
(state, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 6, input, hidden, hx, hy, bias1, bias2);
maxDim = THNN_(minIndexType)
(state, 6, input, hidden, hx, hy, bias1, bias2);
}else{
THCUNN_assertSameGPU
(state, 4, input, hidden, hx, hy);
maxDim = THNN_(minIndexType)
(state, 4, input, hidden, hx, hy);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> hxI =
getTensorInfo<THCTensor, INDTYPE>(state, hx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == THCTensor_(nElement)(state, bias1) &&
hid_size*3 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 6, input, hidden, hx, hy, bias1, bias2);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 4, input, hidden, hx, hy);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<unsigned int>
(state, input, hidden, bias1, bias2, hx, hy);
}else{
THNN_(GRU_forw_ind_wrap)<unsigned long>
(state, input, hidden, bias1, bias2, hx, hy);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *gradOutput,
THCTensor *gradInput)
{
int maxDim = THNN_(minIndexType)(state, 4, input, hidden, gradOutput, gradInput);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradinI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInput);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
gradoutI.collapseDims();
gradinI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *gradOutput,
THCTensor *gradInput)
{
THCTensor_(resizeAs)(state, gradInput, gradOutput);
THCUNN_assertSameGPU(state, 4, input, hidden, gradOutput, gradInput);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, gradOutput, gradInput);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 4, input, hidden, gradOutput, gradInput);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<unsigned int>
(state, input, hidden, gradOutput, gradInput);
}else{
THNN_(GRU_back_ind_wrap)<unsigned long>
(state, input, hidden, gradOutput, gradInput);
}
THCudaCheck(hipGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
| a9351fd0482368706d7761e38d250dda02f5dc58.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define DATATYPE TensorUtils<THCTensor>::DataType
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) __half2float(input)
#define F2H(input) __float2half(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T* ir = &DEVICE_LINEAR_GET(Input, offset+0*hsz);
T* ii = &DEVICE_LINEAR_GET(Input, offset+1*hsz);
T* in = &DEVICE_LINEAR_GET(Input, offset+2*hsz);
T* hr = &DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T* hi = &DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T rg, ig, ng;
rg = *ir + *hr + b1r + b2r;
ig = *ii + *hi + b1i + b2i;
TensorSigmoidOp<real>()(&rg, &rg);
TensorSigmoidOp<real>()(&ig, &ig);
ng = *in + b1n + rg * (hn + b2n);
ng = THCNumerics<T>::tanh(ng);
*hy = ng + ig * (hx - ng);
//SAVE FOR BACKWARDS
*ir = rg;
*ii = ig;
*in = ng;
*hr = hx;
*hi = hn + b2n;
#else
float rg, ig, ng;
rg = H2F(*ir) + H2F(*hr) + H2F(b1r) + H2F(b2r);
ig = H2F(*ii) + H2F(*hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<float>()(&rg, &rg);
TensorSigmoidOp<float>()(&ig, &ig);
ng = H2F(*in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<float>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
*ir = F2H(rg);
*ii = F2H(ig);
*in = F2H(ng);
*hr = hx;
*hi = F2H( H2F(hn) + H2F(b2n) );
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradinput,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;;
//will return input grads here
T* rg = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ig = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* ng = &DEVICE_LINEAR_GET(input, offset+2*hsz);
//will return hidden grads here
T* hx = &DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T* hn = &DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T* oghn=&DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T* gi = &DEVICE_LINEAR_GET(gradinput, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
#ifndef THC_REAL_IS_HALF
T gig = (go)*(*hx-*ng)*( 1-(*ig) )*(*ig);
T ghx = (go)*(*ig);
T gin = (go)*(1-*ig)*( 1-(*ng)*(*ng) );
T ghn = (gin) * (*rg);
T grg = (gin)*(*hn)*( 1-(*rg) )*(*rg);
*gi = ghx;
*rg = grg;
*ig = gig;
*ng = gin;
*hx = grg;
*hn = gig;
*oghn = ghn;
#else
float gig = H2F(go)*( H2F(*hx)-H2F(*ng) )*( 1-H2F(*ig) )*H2F(*ig);
float ghx = H2F(go)*H2F(*ig);
float gin = H2F(go)*( 1-H2F(*ig) )*( 1-H2F(*ng)*H2F(*ng) );
float ghn = H2F(gin) * H2F(*rg);
float grg = H2F(gin)*H2F(*hn)*( 1-H2F(*rg) )*H2F(*rg);
*gi = F2H(ghx);
*rg = F2H(grg);
*ig = F2H(gig);
*ng = F2H(gin);
*hx = F2H(grg);
*hn = F2H(gig);
*oghn = F2H(ghn);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
#ifndef THC_REAL_IS_HALF
T ig, fg, cg, og;
ig = *iig + hig + b1i + b2i;
fg = *ifg + hfg + b1f + b2f;
cg = *icg + hcg + b1c + b2c;
og = *iog + hog + b1o + b2o;
TensorSigmoidOp<real>()(&ig, &ig);
TensorSigmoidOp<real>()(&fg, &fg);
cg = THCNumerics<T>::tanh(cg);
TensorSigmoidOp<real>()(&og, &og);
*cy = (fg * cx) + (ig * cg);
*hy = og * THCNumerics<T>::tanh(*cy);
*iig = ig;
*ifg = fg;
*icg = cg;
*iog = og;
#else
float ig, fg, cg, og;
float f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<float>()(&ig, &ig);
TensorSigmoidOp<float>()(&fg, &fg);
cg = THCNumerics<float>::tanh(cg);
TensorSigmoidOp<float>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<float>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
#endif
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradinput,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(input, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(input, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(input, offset+2*hsz);
T og = DEVICE_LINEAR_GET(input, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(hidden, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradinput, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
#ifndef THC_REAL_IS_HALF
T gcx = THCNumerics<T>::tanh(cy);
T gog = go * gcx;
gcx = go * og * ( 1 - gcx*gcx) + goc;
T gig = gcx * cg;
T gfg = gcx * cx;
T gcg = gcx * ig;
gcx = gcx * fg;
gig = gig * (1-ig) * ig;
gfg = gfg * (1-fg) * fg;
gcg = gcg * (1-cg*cg);
gog = gog * (1-og) * og;
*ih = gig;
*fh = gfg;
*ch = gcg;
*oh = gog;
*gi = gcx;
#else
float gcx = THCNumerics<float>::tanh(H2F(cy));
float gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
float gcg = gcx * H2F(fg);
float gfg = gcx * H2F(cg);
float gig = gcx * H2F(cx);
gog = gog * ( (1-H2F(og))*H2F(og) );
gcg = gcg * (1-H2F(cg)*H2F(cg));
gfg = gfg * ( (1-H2F(fg))*H2F(fg) );
gig = gig * ( (1-H2F(ig))*H2F(ig) );
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
#endif
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM) THNN_(LSTMForward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM) THNN_(LSTMBackward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, cxI, cyI, \
gradoutI, gradoutcI, gradinI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM) THNN_(GRUForward)<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, bias1I, bias2I, hxI, hyI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM) THNN_(GRUBackward) \
<DATATYPE, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>> \
(inputI, hiddenI, gradoutI, gradinI, hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == THCTensor_(nElement)(state, bias1) &&
hid_size*4 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<unsigned int>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<unsigned long>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInput)
{
int maxDim = THNN_(minIndexType)
(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> cxI =
getTensorInfo<THCTensor, INDTYPE>(state, cx);
TensorInfo<DATATYPE, INDTYPE> cyI =
getTensorInfo<THCTensor, INDTYPE>(state, cy);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradoutcI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutputCell);
TensorInfo<DATATYPE, INDTYPE> gradinI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInput);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradinI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInput)
{
THCTensor_(resizeAs)(state, gradInput, gradOutput);
THCUNN_assertSameGPU(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<unsigned int>
(state, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
}else{
THNN_(LSTM_back_ind_wrap)<unsigned long>
(state, input, hidden, cx, cy,
gradOutput, gradOutputCell, gradInput);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 6, input, hidden, hx, hy, bias1, bias2);
maxDim = THNN_(minIndexType)
(state, 6, input, hidden, hx, hy, bias1, bias2);
}else{
THCUNN_assertSameGPU
(state, 4, input, hidden, hx, hy);
maxDim = THNN_(minIndexType)
(state, 4, input, hidden, hx, hy);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply.");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> hxI =
getTensorInfo<THCTensor, INDTYPE>(state, hx);
TensorInfo<DATATYPE, INDTYPE> hyI =
getTensorInfo<THCTensor, INDTYPE>(state, hy);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == THCTensor_(nElement)(state, bias1) &&
hid_size*3 == THCTensor_(nElement)(state, bias2),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
}
INDTYPE zero[1] = {0};
TensorInfo<DATATYPE, INDTYPE> nullinfo =
TensorInfo<DATATYPE, INDTYPE>(NULL, 1, zero, zero);
TensorInfo<DATATYPE, INDTYPE> bias1I = nullinfo;
TensorInfo<DATATYPE, INDTYPE> bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 6, input, hidden, hx, hy, bias1, bias2);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 4, input, hidden, hx, hy);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<unsigned int>
(state, input, hidden, bias1, bias2, hx, hy);
}else{
THNN_(GRU_forw_ind_wrap)<unsigned long>
(state, input, hidden, bias1, bias2, hx, hy);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *gradOutput,
THCTensor *gradInput)
{
int maxDim = THNN_(minIndexType)(state, 4, input, hidden, gradOutput, gradInput);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
THAssertMsg(getApplyGrid(state, totalElements, grid),
"Could not get grid size for pointwise apply");
TensorInfo<DATATYPE, INDTYPE> inputI =
getTensorInfo<THCTensor, INDTYPE>(state, input);
TensorInfo<DATATYPE, INDTYPE> hiddenI =
getTensorInfo<THCTensor, INDTYPE>(state, hidden);
TensorInfo<DATATYPE, INDTYPE> gradoutI =
getTensorInfo<THCTensor, INDTYPE>(state, gradOutput);
TensorInfo<DATATYPE, INDTYPE> gradinI =
getTensorInfo<THCTensor, INDTYPE>(state, gradInput);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
gradoutI.collapseDims();
gradinI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *gradOutput,
THCTensor *gradInput)
{
THCTensor_(resizeAs)(state, gradInput, gradOutput);
THCUNN_assertSameGPU(state, 4, input, hidden, gradOutput, gradInput);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, gradOutput, gradInput);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 4, input, hidden, gradOutput, gradInput);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<unsigned int>
(state, input, hidden, gradOutput, gradInput);
}else{
THNN_(GRU_back_ind_wrap)<unsigned long>
(state, input, hidden, gradOutput, gradInput);
}
THCudaCheck(cudaGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
|
d0c963ddf03e64cdc170ca9791975946568b26ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel.h"
#include <hip/hip_runtime.h>
#include <list>
#include <string>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
using namespace::std;
typedef struct KernelMatMultFastParams
{
public:
dim3 m_bs;
dim3 m_gs;
int m_NumberOfElements;
KernelMatMultFastParams(int bsx, int bsy, int bsz, int gsx, int gsy, int gsz, int numele) :
m_bs(bsx, bsy, bsz),
m_gs(gsx, gsy, gsz),
m_NumberOfElements(numele)
{
if (bsx != 8 && bsx != 16 && bsx != 32) { printf("\n***Error bsx !=8,16,32\n"); exit(EXIT_FAILURE); }
if (bsy != 8 && bsy != 16 && bsy != 32) { printf("\n***Error bsy !=8,16,32\n"); exit(EXIT_FAILURE); }
if (bsz != 1) { printf("\n***Error bsz != 1\n"); exit(EXIT_FAILURE); }
if (gsx < 1) { printf("\n***Error gsx < 1\n"); exit(EXIT_FAILURE); }
if (gsy < 1) { printf("\n***Error gsy < 1\n"); exit(EXIT_FAILURE); }
if (gsz != 1) { printf("\n***Error gsz != 1\n"); exit(EXIT_FAILURE); }
if (numele < 1) { printf("\n***Error numele < 1\n"); exit(EXIT_FAILURE); }
if (bsx*gsx*bsy*gsy != numele) { printf("\n***Error bsx*gsx*bsy*gsy != numele (%d,%d,%d,%d,%d)\n", bsx, gsx, bsy, gsy, numele); exit(EXIT_FAILURE); }
}
} KernelMatMultFastParams_t;
void QueryKernelMatMultFast(char *KernelName)
{
int bsize[] = { 8, 16, 32, 0 };
int gsize[] = { 16, 32, 64, 128, 256, 0 };
list<KernelMatMultFastParams_t*> params;
for (int *gs = &gsize[0]; *gs != 0; gs++)
for (int *bs = &bsize[0]; *bs != 0; bs++)
{
int ms = (*gs)*(*bs);
params.push_back(new KernelMatMultFastParams_t(*bs, *bs, 1, *gs, *gs, 1, ms));
}
printf("\n#\n# %s\n#", KernelName);
list<KernelMatMultFastParams_t*>::iterator i = params.begin();
printf("\n%s: compile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d",
KernelName,
(*i)->m_bs.x,
(*i)->m_bs.y,
(*i)->m_bs.z,
(*i)->m_gs.x,
(*i)->m_gs.y,
(*i)->m_gs.z,
(*i)->m_NumberOfElements);
for (i++; i != params.end(); ++i)
{
printf("\n%s: nocompile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d",
KernelName,
(*i)->m_bs.x,
(*i)->m_bs.y,
(*i)->m_bs.z,
(*i)->m_gs.x,
(*i)->m_gs.y,
(*i)->m_gs.z,
(*i)->m_NumberOfElements);
}
printf("\n");
}
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
template <int BLOCK_SIZE> __global__ void
kernelMatMultFast(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
static inline float _mm256_reduce_add_ps(__m256 x) {
/* ( x3+x7, x2+x6, x1+x5, x0+x4 ) */
const __m128 x128 = _mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x));
/* ( -, -, x1+x3+x5+x7, x0+x2+x4+x6 ) */
const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128));
/* ( -, -, -, x0+x1+x2+x3+x4+x5+x6+x7 ) */
const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
/* Conversion to float is a no-op on x86-64 */
return _mm_cvtss_f32(x32);
}
void LaunchKernelMatMultFast(dim3& gs, dim3& bs, char **argv, int argc, int nextarg)
{
printf("\nPreparing %s", KernelMatMultFastName);
if (strcmp(argv[nextarg], "-numele") == 0)
{
printf("\nAllocating RAM");
hipError_t err = hipSuccess;
int numElements = stoi(argv[nextarg + 1], nullptr);
const int numElementsSq = numElements*numElements;
KernelMatMultFastParams_t Verify(bs.x, bs.y, bs.z, gs.x, gs.y, gs.z, numElementsSq);
Matrix d_A;
d_A.width = numElements; d_A.height = numElements;
size_t size_A = d_A.width * d_A.height * sizeof(float);
err = hipMalloc(&d_A.elements, size_A);
if (err != hipSuccess)
{
printf("Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Matrix d_B;
d_B.width = numElements; d_B.height = numElements;
size_t size_B = d_B.width * d_B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size_B);
if (err != hipSuccess)
{
printf("Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Matrix d_C;
d_C.width = numElements; d_C.height = numElements;
size_t size_C = d_C.width * d_C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size_C);
if (err != hipSuccess)
{
printf("Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *h_A = new float[numElementsSq];
float *h_B = new float[numElementsSq];
float *h_C = new float[numElementsSq];
float *h_AB = new float[numElementsSq];
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_AB == NULL)
{
printf("Failed to allocate host vectors in LaunchKernelMatMultFast\n");
exit(EXIT_FAILURE);
}
printf("\nInitializing GPU RAM");
InitRandomSequence(d_A.elements, numElementsSq);
InitRandomSequence(d_B.elements, numElementsSq);
printf("\nLaunching kernel: kernelMatMultFast");
printf("\n\tgridsize (%d,%d,%d)", gs.x, gs.y, gs.z);
printf("\n\tblocksize (%d,%d,%d)", bs.x, bs.y, bs.z);
printf("\n\tNumElements %d", numElementsSq);
if ((bs.x != 32 || bs.y != 32) && (bs.x != 16 || bs.y != 16) && (bs.x != 8 || bs.y != 8))
{
printf("\nBlock size must be 8x8 or 16x16 or 32x32 because of template for MatMultFast");
exit(EXIT_FAILURE);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
if (bs.x == 32)
{
kernelMatMultFast<32> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
else
if (bs.x == 16)
{
kernelMatMultFast<16> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
else
if (bs.x == 8)
{
kernelMatMultFast<8> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
err = hipMemcpy(h_A, d_A.elements, size_A, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
printf("Failed to copy matrix A from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_B, d_B.elements, size_B, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
printf("Failed to copy matrix B from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_C, d_C.elements, size_C, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
printf("Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//
// Now compute AB= A*B on the host so that we can compare it with the GPU.
//
// First transpose h_B matrix into h_T using SSE.
// Then use OpenMP and AVX to perform the matrix multiplication 8 floats at a time.
//
printf("\nGPU finished %f milliseconds.\nComputing host solution ...", milliseconds);
{
float *h_T = new float[numElementsSq];
for (int i = 0; i < numElements; i += 4)
{
for (int j = 0; j < numElements; j += 4)
{
__m128 B[4];
for (int k = 0; k < 4; k++)
{
B[k] = _mm_load_ps(&h_B[(i + k)*numElements + j]);
}
_MM_TRANSPOSE4_PS(B[0], B[1], B[2], B[3]);
for (int k = 0; k < 4; k++)
_mm_store_ps(&h_T[(j + k)*numElements + i], B[k]);
}
}
#pragma omp parallel for
for (int i = 0; i < numElements; i++)
{
for (int j = 0; j < numElements; j++)
{
__m256 T = _mm256_setzero_ps();
for (int k = 0; k < numElements; k += 8)
{
__m256 A1 = _mm256_load_ps(&h_A[i*numElements + k]);
__m256 T1 = _mm256_load_ps(&h_T[j*numElements + k]);
__m256 C = _mm256_mul_ps(A1, T1);
T = _mm256_add_ps(C, T);
}
float Q = _mm256_reduce_add_ps(T);
h_AB[i*numElements + j] = Q;
}
}
delete[]h_T;
}
// Verify that the result vector is correct
printf("\nValidating results ...");
#pragma omp parallel for
for (int i = 0; i < numElementsSq; ++i)
{
float T1 = h_AB[i];
float T2 = h_C[i];
if (fabs(T1 - T2) > 0.009f)
{
printf("Result verification failed at element %d!\n", i);
printf("h_AB[%d] = %f, h_C[%d]=%f\n", i, h_AB[i], i, h_C[i]);
exit(EXIT_FAILURE);
}
}
printf(" success!\n");
err = hipFree(d_A.elements);
if (err != hipSuccess)
{
printf("Failed to free device matrix A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B.elements);
if (err != hipSuccess)
{
printf("Failed to free device matrix B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C.elements);
if (err != hipSuccess)
{
printf("Failed to free device matrix C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
delete[]h_A;
delete[]h_B;
delete[]h_C;
delete[]h_AB;
}
else
{
printf("\nExpecting -numele, but saw %s", argv[nextarg]);
exit(EXIT_FAILURE);
}
}
| d0c963ddf03e64cdc170ca9791975946568b26ed.cu |
#include "kernel.h"
#include <cuda_runtime.h>
#include <list>
#include <string>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
using namespace::std;
typedef struct KernelMatMultFastParams
{
public:
dim3 m_bs;
dim3 m_gs;
int m_NumberOfElements;
KernelMatMultFastParams(int bsx, int bsy, int bsz, int gsx, int gsy, int gsz, int numele) :
m_bs(bsx, bsy, bsz),
m_gs(gsx, gsy, gsz),
m_NumberOfElements(numele)
{
if (bsx != 8 && bsx != 16 && bsx != 32) { printf("\n***Error bsx !=8,16,32\n"); exit(EXIT_FAILURE); }
if (bsy != 8 && bsy != 16 && bsy != 32) { printf("\n***Error bsy !=8,16,32\n"); exit(EXIT_FAILURE); }
if (bsz != 1) { printf("\n***Error bsz != 1\n"); exit(EXIT_FAILURE); }
if (gsx < 1) { printf("\n***Error gsx < 1\n"); exit(EXIT_FAILURE); }
if (gsy < 1) { printf("\n***Error gsy < 1\n"); exit(EXIT_FAILURE); }
if (gsz != 1) { printf("\n***Error gsz != 1\n"); exit(EXIT_FAILURE); }
if (numele < 1) { printf("\n***Error numele < 1\n"); exit(EXIT_FAILURE); }
if (bsx*gsx*bsy*gsy != numele) { printf("\n***Error bsx*gsx*bsy*gsy != numele (%d,%d,%d,%d,%d)\n", bsx, gsx, bsy, gsy, numele); exit(EXIT_FAILURE); }
}
} KernelMatMultFastParams_t;
void QueryKernelMatMultFast(char *KernelName)
{
int bsize[] = { 8, 16, 32, 0 };
int gsize[] = { 16, 32, 64, 128, 256, 0 };
list<KernelMatMultFastParams_t*> params;
for (int *gs = &gsize[0]; *gs != 0; gs++)
for (int *bs = &bsize[0]; *bs != 0; bs++)
{
int ms = (*gs)*(*bs);
params.push_back(new KernelMatMultFastParams_t(*bs, *bs, 1, *gs, *gs, 1, ms));
}
printf("\n#\n# %s\n#", KernelName);
list<KernelMatMultFastParams_t*>::iterator i = params.begin();
printf("\n%s: compile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d",
KernelName,
(*i)->m_bs.x,
(*i)->m_bs.y,
(*i)->m_bs.z,
(*i)->m_gs.x,
(*i)->m_gs.y,
(*i)->m_gs.z,
(*i)->m_NumberOfElements);
for (i++; i != params.end(); ++i)
{
printf("\n%s: nocompile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d",
KernelName,
(*i)->m_bs.x,
(*i)->m_bs.y,
(*i)->m_bs.z,
(*i)->m_gs.x,
(*i)->m_gs.y,
(*i)->m_gs.z,
(*i)->m_NumberOfElements);
}
printf("\n");
}
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
template <int BLOCK_SIZE> __global__ void
kernelMatMultFast(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
static inline float _mm256_reduce_add_ps(__m256 x) {
/* ( x3+x7, x2+x6, x1+x5, x0+x4 ) */
const __m128 x128 = _mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x));
/* ( -, -, x1+x3+x5+x7, x0+x2+x4+x6 ) */
const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128));
/* ( -, -, -, x0+x1+x2+x3+x4+x5+x6+x7 ) */
const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
/* Conversion to float is a no-op on x86-64 */
return _mm_cvtss_f32(x32);
}
void LaunchKernelMatMultFast(dim3& gs, dim3& bs, char **argv, int argc, int nextarg)
{
printf("\nPreparing %s", KernelMatMultFastName);
if (strcmp(argv[nextarg], "-numele") == 0)
{
printf("\nAllocating RAM");
cudaError_t err = cudaSuccess;
int numElements = stoi(argv[nextarg + 1], nullptr);
const int numElementsSq = numElements*numElements;
KernelMatMultFastParams_t Verify(bs.x, bs.y, bs.z, gs.x, gs.y, gs.z, numElementsSq);
Matrix d_A;
d_A.width = numElements; d_A.height = numElements;
size_t size_A = d_A.width * d_A.height * sizeof(float);
err = cudaMalloc(&d_A.elements, size_A);
if (err != cudaSuccess)
{
printf("Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Matrix d_B;
d_B.width = numElements; d_B.height = numElements;
size_t size_B = d_B.width * d_B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size_B);
if (err != cudaSuccess)
{
printf("Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Matrix d_C;
d_C.width = numElements; d_C.height = numElements;
size_t size_C = d_C.width * d_C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size_C);
if (err != cudaSuccess)
{
printf("Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *h_A = new float[numElementsSq];
float *h_B = new float[numElementsSq];
float *h_C = new float[numElementsSq];
float *h_AB = new float[numElementsSq];
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_AB == NULL)
{
printf("Failed to allocate host vectors in LaunchKernelMatMultFast\n");
exit(EXIT_FAILURE);
}
printf("\nInitializing GPU RAM");
InitRandomSequence(d_A.elements, numElementsSq);
InitRandomSequence(d_B.elements, numElementsSq);
printf("\nLaunching kernel: kernelMatMultFast");
printf("\n\tgridsize (%d,%d,%d)", gs.x, gs.y, gs.z);
printf("\n\tblocksize (%d,%d,%d)", bs.x, bs.y, bs.z);
printf("\n\tNumElements %d", numElementsSq);
if ((bs.x != 32 || bs.y != 32) && (bs.x != 16 || bs.y != 16) && (bs.x != 8 || bs.y != 8))
{
printf("\nBlock size must be 8x8 or 16x16 or 32x32 because of template for MatMultFast");
exit(EXIT_FAILURE);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (bs.x == 32)
{
kernelMatMultFast<32> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
else
if (bs.x == 16)
{
kernelMatMultFast<16> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
else
if (bs.x == 8)
{
kernelMatMultFast<8> << <gs, bs >> > (d_C.elements, d_A.elements, d_B.elements, d_A.width, d_B.height);
// CHECK_LAUNCH_ERROR();
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
err = cudaMemcpy(h_A, d_A.elements, size_A, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
printf("Failed to copy matrix A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_B, d_B.elements, size_B, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
printf("Failed to copy matrix B from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_C, d_C.elements, size_C, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
printf("Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//
// Now compute AB= A*B on the host so that we can compare it with the GPU.
//
// First transpose h_B matrix into h_T using SSE.
// Then use OpenMP and AVX to perform the matrix multiplication 8 floats at a time.
//
printf("\nGPU finished %f milliseconds.\nComputing host solution ...", milliseconds);
{
float *h_T = new float[numElementsSq];
for (int i = 0; i < numElements; i += 4)
{
for (int j = 0; j < numElements; j += 4)
{
__m128 B[4];
for (int k = 0; k < 4; k++)
{
B[k] = _mm_load_ps(&h_B[(i + k)*numElements + j]);
}
_MM_TRANSPOSE4_PS(B[0], B[1], B[2], B[3]);
for (int k = 0; k < 4; k++)
_mm_store_ps(&h_T[(j + k)*numElements + i], B[k]);
}
}
#pragma omp parallel for
for (int i = 0; i < numElements; i++)
{
for (int j = 0; j < numElements; j++)
{
__m256 T = _mm256_setzero_ps();
for (int k = 0; k < numElements; k += 8)
{
__m256 A1 = _mm256_load_ps(&h_A[i*numElements + k]);
__m256 T1 = _mm256_load_ps(&h_T[j*numElements + k]);
__m256 C = _mm256_mul_ps(A1, T1);
T = _mm256_add_ps(C, T);
}
float Q = _mm256_reduce_add_ps(T);
h_AB[i*numElements + j] = Q;
}
}
delete[]h_T;
}
// Verify that the result vector is correct
printf("\nValidating results ...");
#pragma omp parallel for
for (int i = 0; i < numElementsSq; ++i)
{
float T1 = h_AB[i];
float T2 = h_C[i];
if (fabs(T1 - T2) > 0.009f)
{
printf("Result verification failed at element %d!\n", i);
printf("h_AB[%d] = %f, h_C[%d]=%f\n", i, h_AB[i], i, h_C[i]);
exit(EXIT_FAILURE);
}
}
printf(" success!\n");
err = cudaFree(d_A.elements);
if (err != cudaSuccess)
{
printf("Failed to free device matrix A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B.elements);
if (err != cudaSuccess)
{
printf("Failed to free device matrix B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C.elements);
if (err != cudaSuccess)
{
printf("Failed to free device matrix C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
delete[]h_A;
delete[]h_B;
delete[]h_C;
delete[]h_AB;
}
else
{
printf("\nExpecting -numele, but saw %s", argv[nextarg]);
exit(EXIT_FAILURE);
}
}
|
7416e995bfe8631098918924ff0d969a332e7c7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "Particles.h"
#include "Alloc.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk ];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk ];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk ];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk ];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk ];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk ];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
__global__ void INTERPP2G_GPU(struct particles* part, struct interpDensSpecies* ids, struct grid* grd, long particles){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= particles){
return;
}
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)];
eta[0] = part->y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)];
zeta[0] = part->z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)];
xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->x[part_index];
eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->y[part_index];
zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->z[part_index];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->rhon[ix - ii][iy - jj][iz - kk] += weight[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++)
ids->pzz[ix -ii][iy -jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
}
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd, long size)
{
particles* gpu_particles;
interpDensSpecies* gpu_interp;
hipMalloc((void**)&gpu_particles,size * sizeof(particles));
}
| 7416e995bfe8631098918924ff0d969a332e7c7a.cu | #include "Particles.h"
#include "Alloc.h"
#include <cuda.h>
#include <cuda_runtime.h>
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk ];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk ];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk ];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk ];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk ];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk ];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
__global__ void INTERPP2G_GPU(struct particles* part, struct interpDensSpecies* ids, struct grid* grd, long particles){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= particles){
return;
}
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)];
eta[0] = part->y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)];
zeta[0] = part->z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)];
xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->x[part_index];
eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->y[part_index];
zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part->z[part_index];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->rhon[ix - ii][iy - jj][iz - kk] += weight[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++)
ids->pzz[ix -ii][iy -jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
}
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd, long size)
{
particles* gpu_particles;
interpDensSpecies* gpu_interp;
cudaMalloc((void**)&gpu_particles,size * sizeof(particles));
}
|
9fec1e13b85e1da6bceb44b5553b306ea4226f7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BilateralCuda_naive.cuh"
using namespace cv;
using namespace cv::cuda;
using namespace std;
#define TILE_X 32
#define TILE_Y 32
//constant memmory
__constant__ float cGaussian[64];
//texture memory
texture<uchar, 2, hipReadModeElementType> tex;
__host__ void updateGaussian(const int r, const double sd)
{
float fGaussian[64];
for (int i = 0; i < 2 * r + 1; i++)
{
float x = i - r;
fGaussian[i] = expf(-(x * x) / (2 * sd * sd));
}
//
hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float) * (2 * r + 1));
}
//
__device__ inline float gaussian(const float x, const float sigma)
{
return __expf(-(powf(x, 2)) / (2 * powf(sigma, 2)));
}
__global__ void gpuCalculation_texture(const uchar* src, uchar* dest, const int width, const int height, const int r, const float sigma_r, const float sigma_s)
{
int idx = __mul24(blockIdx.x, TILE_X) + threadIdx.x;
int idy = __mul24(blockIdx.y, TILE_Y) + threadIdx.y;
if ((idx < width) && (idy < height))
{
double sum = 0;
double wsum = 0;
//
uchar tgt = tex2D(tex, idx + r, idy + r);
for (int dy = -r; dy <= r; dy++) {
for (int dx = -r; dx <= r; dx++) {
//
uchar ref = tex2D(tex, idx + dx + r, idy + dy + r);
double w = (cGaussian[dy + r] * cGaussian[dx + r]) * gaussian(tgt - ref, sigma_r);
sum += w * ref;
wsum += w;
}
}
dest[(idy)*width + idx] = sum / wsum;
}
}
__global__ void gpuCalculation_global(const uchar* src, uchar* dest, const int width, const int height, const int r, const float sigma_r, const float sigma_s)
{
int idx = __mul24(blockIdx.x, TILE_X) + threadIdx.x;
int idy = __mul24(blockIdx.y, TILE_Y) + threadIdx.y;
if ((idx < width) && (idy < height))
{
double sum = 0;
double wsum = 0;
//
uchar tgt = src[(idy+r)*(width+2*r)+idx+r];
for (int dy = -r; dy <= r; dy++) {
for (int dx = -r; dx <= r; dx++) {
//
uchar ref = src[(idy + dy + r) * (width + 2 * r) + idx + dx + r];
double w = (cGaussian[dy + r] * cGaussian[dx + r]) * gaussian(tgt - ref, sigma_r);
sum += w * ref;
wsum += w;
}
}
dest[(idy)*width + idx] = sum / wsum;
}
}
vector<double> bilateralFilterCuda_naive_texture(const Mat& src, Mat& dest, const int r, const float sigma_r, const float sigma_s, const int loop)
{
// GPU
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//int gray_size = src.cols * src.rows;
int gray_size_dest = dest.cols * dest.rows;
//
//
size_t pitch;
uchar* DevSrc = NULL;
uchar* DevDest;
//
updateGaussian(r, sigma_s);
// pitchcpy2DGPU
hipMallocPitch(&DevSrc, &pitch, sizeof(uchar) * src.step, src.rows);
hipMemcpy2D(DevSrc, pitch, src.ptr(), sizeof(uchar) * src.step, sizeof(uchar) * src.step, src.rows, hipMemcpyHostToDevice);
hipBindTexture2D(0, tex, DevSrc, src.step, src.rows, pitch);
//
hipMalloc<uchar>(&DevDest, gray_size_dest);
dim3 block(TILE_X, TILE_Y);
//
dim3 grid((dest.cols + block.x - 1) / block.x, (dest.rows + block.y - 1) / block.y);
//
int loop_ = loop;
vector<double> calcTimes(loop);
while (loop_--) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gpuCalculation_texture), dim3(grid), dim3(block), 0, 0, DevSrc, DevDest, dest.cols, dest.rows, r, sigma_r, sigma_s);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
calcTimes.push_back(time);
}
// DtoH
hipMemcpy(dest.ptr(), DevDest, gray_size_dest, hipMemcpyDeviceToHost);
//
hipFree(DevSrc);
hipFree(DevDest);
//
hipUnbindTexture(tex);
hipDeviceReset();
return calcTimes;
}
vector<double> bilateralFilterCuda_naive_global(const Mat& src, Mat& dest, const int r, const float sigma_r, const float sigma_s, const int loop)
{
// GPU
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
int gray_size = src.cols * src.rows;
int gray_size_dest = dest.cols * dest.rows;
size_t pitch;
uchar* DevSrc = NULL;
uchar* DevDest;
updateGaussian(r, sigma_s);
hipMalloc<uchar>(&DevSrc, gray_size);
hipMemcpy(DevSrc, src.ptr(), gray_size, hipMemcpyHostToDevice);
//
hipMalloc<uchar>(&DevDest, gray_size_dest);
dim3 block(TILE_X, TILE_Y);
dim3 grid((dest.cols + block.x - 1) / block.x, (dest.rows + block.y - 1) / block.y);
//
int loop_ = loop;
vector<double> calcTimes(loop);
while (loop_--) {
hipEventRecord(start, 0);
gpuCalculation_global<< <grid, block >> > (DevSrc, DevDest, dest.cols, dest.rows, r, sigma_r, sigma_s);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
calcTimes.push_back(time);
}
// DtoH
hipMemcpy(dest.ptr(), DevDest, gray_size_dest, hipMemcpyDeviceToHost);
//
hipFree(DevSrc);
hipFree(DevDest);
//
hipDeviceReset();
return calcTimes;
} | 9fec1e13b85e1da6bceb44b5553b306ea4226f7e.cu | #include "BilateralCuda_naive.cuh"
using namespace cv;
using namespace cv::cuda;
using namespace std;
#define TILE_X 32
#define TILE_Y 32
//constant memmory
__constant__ float cGaussian[64];
//texture memory
texture<uchar, 2, cudaReadModeElementType> tex;
__host__ void updateGaussian(const int r, const double sd)
{
float fGaussian[64];
for (int i = 0; i < 2 * r + 1; i++)
{
float x = i - r;
fGaussian[i] = expf(-(x * x) / (2 * sd * sd));
}
// 外で宣言してあるコンスタントメモリに、空間的距離のガウシアンを用意
cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float) * (2 * r + 1));
}
// 輝度差の方のガウシアン
__device__ inline float gaussian(const float x, const float sigma)
{
return __expf(-(powf(x, 2)) / (2 * powf(sigma, 2)));
}
__global__ void gpuCalculation_texture(const uchar* src, uchar* dest, const int width, const int height, const int r, const float sigma_r, const float sigma_s)
{
int idx = __mul24(blockIdx.x, TILE_X) + threadIdx.x;
int idy = __mul24(blockIdx.y, TILE_Y) + threadIdx.y;
if ((idx < width) && (idy < height))
{
double sum = 0;
double wsum = 0;
// 注目画素
uchar tgt = tex2D(tex, idx + r, idy + r);
for (int dy = -r; dy <= r; dy++) {
for (int dx = -r; dx <= r; dx++) {
// テクスチャ
uchar ref = tex2D(tex, idx + dx + r, idy + dy + r);
double w = (cGaussian[dy + r] * cGaussian[dx + r]) * gaussian(tgt - ref, sigma_r);
sum += w * ref;
wsum += w;
}
}
dest[(idy)*width + idx] = sum / wsum;
}
}
__global__ void gpuCalculation_global(const uchar* src, uchar* dest, const int width, const int height, const int r, const float sigma_r, const float sigma_s)
{
int idx = __mul24(blockIdx.x, TILE_X) + threadIdx.x;
int idy = __mul24(blockIdx.y, TILE_Y) + threadIdx.y;
if ((idx < width) && (idy < height))
{
double sum = 0;
double wsum = 0;
// グローバルメモリの実装
uchar tgt = src[(idy+r)*(width+2*r)+idx+r];
for (int dy = -r; dy <= r; dy++) {
for (int dx = -r; dx <= r; dx++) {
// グローバル
uchar ref = src[(idy + dy + r) * (width + 2 * r) + idx + dx + r];
double w = (cGaussian[dy + r] * cGaussian[dx + r]) * gaussian(tgt - ref, sigma_r);
sum += w * ref;
wsum += w;
}
}
dest[(idy)*width + idx] = sum / wsum;
}
}
vector<double> bilateralFilterCuda_naive_texture(const Mat& src, Mat& dest, const int r, const float sigma_r, const float sigma_s, const int loop)
{
// GPUの時間計測
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//int gray_size = src.cols * src.rows;
int gray_size_dest = dest.cols * dest.rows;
// メモリ確保
// バンクコンフリクト防止
size_t pitch;
uchar* DevSrc = NULL;
uchar* DevDest;
// 空間のガウシアンを作成
updateGaussian(r, sigma_s);
// pitchとcpy2Dで確保とGPUへのコピーをして、バインドも張る
cudaMallocPitch(&DevSrc, &pitch, sizeof(uchar) * src.step, src.rows);
cudaMemcpy2D(DevSrc, pitch, src.ptr(), sizeof(uchar) * src.step, sizeof(uchar) * src.step, src.rows, cudaMemcpyHostToDevice);
cudaBindTexture2D(0, tex, DevSrc, src.step, src.rows, pitch);
// 出力用
cudaMalloc<uchar>(&DevDest, gray_size_dest);
dim3 block(TILE_X, TILE_Y);
// 画像全体を覆うようなグリッドサイズ計算
dim3 grid((dest.cols + block.x - 1) / block.x, (dest.rows + block.y - 1) / block.y);
// 計測
int loop_ = loop;
vector<double> calcTimes(loop);
while (loop_--) {
cudaEventRecord(start, 0);
gpuCalculation_texture<<<grid, block>>>(DevSrc, DevDest, dest.cols, dest.rows, r, sigma_r, sigma_s);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
calcTimes.push_back(time);
}
// DtoH
cudaMemcpy(dest.ptr(), DevDest, gray_size_dest, cudaMemcpyDeviceToHost);
// 解放
cudaFree(DevSrc);
cudaFree(DevDest);
// バインドの解除
cudaUnbindTexture(tex);
cudaDeviceReset();
return calcTimes;
}
vector<double> bilateralFilterCuda_naive_global(const Mat& src, Mat& dest, const int r, const float sigma_r, const float sigma_s, const int loop)
{
// GPUの時間計測
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int gray_size = src.cols * src.rows;
int gray_size_dest = dest.cols * dest.rows;
size_t pitch;
uchar* DevSrc = NULL;
uchar* DevDest;
updateGaussian(r, sigma_s);
cudaMalloc<uchar>(&DevSrc, gray_size);
cudaMemcpy(DevSrc, src.ptr(), gray_size, cudaMemcpyHostToDevice);
// 出力用
cudaMalloc<uchar>(&DevDest, gray_size_dest);
dim3 block(TILE_X, TILE_Y);
dim3 grid((dest.cols + block.x - 1) / block.x, (dest.rows + block.y - 1) / block.y);
// 計測
int loop_ = loop;
vector<double> calcTimes(loop);
while (loop_--) {
cudaEventRecord(start, 0);
gpuCalculation_global<< <grid, block >> > (DevSrc, DevDest, dest.cols, dest.rows, r, sigma_r, sigma_s);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
calcTimes.push_back(time);
}
// DtoH
cudaMemcpy(dest.ptr(), DevDest, gray_size_dest, cudaMemcpyDeviceToHost);
// 解放
cudaFree(DevSrc);
cudaFree(DevDest);
// バインドの解除
cudaDeviceReset();
return calcTimes;
} |
34498a4e35e5b0a7f26c3978e563dd2f40831d98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <hjmccoy@lbl.gov
*
* ============================================================================
*/
#include <poggers/allocators/veb.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::allocators;
// __global__ void test_kernel(veb_tree * tree, uint64_t num_removes, int num_iterations){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid >= num_removes)return;
// //printf("Tid %lu\n", tid);
// for (int i=0; i< num_iterations; i++){
// if (!tree->remove(tid)){
// printf("BUG\n");
// }
// tree->insert(tid);
// }
__global__ void view_bits(layer * dev_layer){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("First set of bits: %lu\n", dev_layer->bits[0]);
}
__global__ void view_tree_bits(veb_tree * dev_tree){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
for (int i = 0; i< dev_tree->num_layers; i++){
printf("First bits of layer %d: %lu\n", i, dev_tree->layers[i]->bits[0]);
}
printf("End of tree\n");
}
__global__ void remove_insert_kernel(veb_tree * dev_tree, uint64_t num_removes, int num_rounds, uint64_t * misses){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_removes) return;
for (int i = 0; i < num_rounds; i++){
uint64_t remove = dev_tree->malloc();
if (remove != veb_tree::fail()){
bool dev_removes = dev_tree->insert(remove);
if (!dev_removes){ printf("Fail!\n"); }
} else {
atomicAdd((unsigned long long int *) misses, 1ULL);
}
}
}
__global__ void multi_run_remove_insert_kernel(veb_tree * dev_tree, uint64_t num_removes, int num_rounds, uint64_t * misses){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_removes) return;
uint64_t addresses[10];
for (int i = 0; i < num_rounds; i++){
uint64_t remove = dev_tree->malloc();
addresses[i] = remove;
}
for (int i = 0; i < num_rounds; i++){
if (addresses[i] == veb_tree::fail()){
atomicAdd((unsigned long long int *)misses, 1ULL);
} else {
bool remove = dev_tree->insert(addresses[i]);
if (!remove) printf("Failed to re-insert %lu\n", addresses[i]);
}
}
}
__global__ void check_insert_kernel(veb_tree * dev_tree, uint64_t * items, uint64_t * misses, uint64_t num_items){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_items) return;
uint64_t offset_at_lowest = dev_tree->malloc();
if (offset_at_lowest == veb_tree::fail()){
atomicAdd((unsigned long long int *)misses, 1ULL);
} else {
items[tid] = offset_at_lowest;
}
}
__global__ void free_insert_kernel(veb_tree * dev_tree, uint64_t * items, uint64_t num_items){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_items) return;
dev_tree->insert(items[tid]);
}
__global__ void assert_unique(uint64_t * items, uint64_t num_items){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_items) return;
uint64_t item = items[tid];
//0's are misses
if (item == 0) return;
for (uint64_t i = 0; i < tid; i++){
if (i == tid) continue;
if (item == items[i]){
printf("Conflict betwen %lu and %lu: %lu\n", tid, i, item);
}
}
}
__global__ void remove_insert_kernel_single_thread(veb_tree * dev_tree, uint64_t * items, uint64_t num_removes, int num_rounds){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid != 0) return;
for (int i = 0; i < num_rounds; i++){
for (uint64_t item_counter = 0; item_counter < num_removes; item_counter++){
items[item_counter] = dev_tree->malloc();
assert(items[item_counter] != veb_tree::fail());
}
for (uint64_t item_counter = 0; item_counter < num_removes; item_counter++){
bool dev_removes = dev_tree->insert(items[item_counter]);
assert(dev_removes);
}
}
}
// }
// __global__ void view_kernel(veb_tree * tree){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid != 0)return;
// }
//using allocator_type = buddy_allocator<0,0>;
int main(int argc, char** argv) {
// if (!test_one_thread()){
// printf("Test one thread: [FAIL]\n");
// } else {
// printf("Test one thread: [PASS]\n");
// }
// uint64_t num_removes = 64;
// veb_tree * test_allocator = veb_tree::generate_on_device(num_removes);
// hipDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator);
// hipDeviceSynchronize();
// auto insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator, num_removes,1);
// hipDeviceSynchronize();
// auto insert_end= std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator->space_in_bytes());
// std::chrono::duration<double> elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
//veb_tree::free_on_device(test_allocator);
// for (int i = 0; i < 32; i++){
// layer * try_layer = layer::generate_on_device((1ULL << i));
// hipDeviceSynchronize();
// view_bits<<<1,1>>>(try_layer);
// hipDeviceSynchronize();
// layer::free_on_device(try_layer);
// hipDeviceSynchronize();
// }
// for (int i = 28; i < 29; i++){
// uint64_t num_items = (1ULL << i);
// printf("%d shifted is %lu\n", i, num_items);
// uint64_t * items;
// CHECK_CUDA_ERROR(hipMalloc((void **)&items, num_items*sizeof(uint64_t)));
// hipMemset(items, 0, num_items*sizeof(uint64_t));
// uint64_t * misses;
// hipMallocManaged((void **)&misses, sizeof(uint64_t));
// hipDeviceSynchronize();
// misses[0] = 0;
// hipDeviceSynchronize();
// veb_tree * tree = veb_tree::generate_on_device(num_items, i);
// hipDeviceSynchronize();
// check_insert_kernel<<<(num_items-1)/512+1,512>>>(tree, items, misses, num_items);
// hipDeviceSynchronize();
// std::cout << "Missed " << misses[0] << "/" << (uint64_t) (num_items) << " items, fraction: " << 1.0*misses[0]/(num_items) << "\n";
// assert_unique<<<(num_items-1)/512+1,512>>>(items, num_items);
// hipDeviceSynchronize();
// free_insert_kernel<<<(num_items-1)/512+1,512>>>(tree, items, num_items);
// hipDeviceSynchronize();
// hipFree(items);
// hipFree(misses);
// veb_tree::free_on_device(tree);
// hipDeviceSynchronize();
// }
for (int i = 15; i < 32; i++){
uint64_t num_items = (1ULL << i);
uint64_t * items;
//hipMalloc((void **)&items, num_items*sizeof(uint64_t));
uint64_t * misses;
hipMallocManaged((void **)&misses, sizeof(uint64_t));
hipDeviceSynchronize();
misses[0] = 0;
hipDeviceSynchronize();
int num_rounds = 10;
printf("Starting tree %d with %lu items\n", i, num_items);
veb_tree * tree = veb_tree::generate_on_device(num_items, 15);
hipDeviceSynchronize();
auto insert_start = std::chrono::high_resolution_clock::now();
//peek
//view_tree_bits<<<1,1>>>(tree);
//remove_insert_kernel_single_thread<<<1,1>>>(tree, items, num_items, num_rounds);
hipLaunchKernelGGL(( remove_insert_kernel), dim3((num_items-1)/512+1),dim3(512), 0, 0, tree, num_items,num_rounds, misses);
//view_tree_bits<<<1,1>>>(tree);
hipDeviceSynchronize();
//multi_run_remove_insert_kernel<<<(num_items/10-1)/512+1, 512>>>(tree, num_items/10, 10, misses);
hipDeviceSynchronize();
auto insert_end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = insert_end - insert_start;
std::cout << "Inserted " << num_items*num_rounds << " in " << elapsed_seconds.count() << " seconds, throughput: " << std::fixed << 1.0*(num_items*num_rounds)/elapsed_seconds.count() << std::endl;
std::cout << "Missed " << misses[0] << "/" << (uint64_t) (num_items*num_rounds) << " items, fraction: " << 1.0*misses[0]/(num_items*num_rounds) << "\n";
//hipFree(items);
hipFree(misses);
veb_tree::free_on_device(tree);
}
// hipDeviceSynchronize();
// num_removes = (1ULL << 32);
// veb_tree * test_allocator_2 = veb_tree::generate_on_device(num_removes);
// hipDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator_2);
// hipDeviceSynchronize();
// insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator_2, num_removes,1);
// hipDeviceSynchronize();
// insert_end = std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator_2->space_in_bytes());
// elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
// //printf("Aggregate inserts: %lu in %lu: %f\n", num_removes, time, 1.0*num_removes/time);
// hipDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator_2);
// hipDeviceSynchronize();
// veb_tree::free_on_device(test_allocator_2);
// hipDeviceSynchronize();
// num_removes = (1ULL << 34)/128;
// veb_tree * test_allocator_3 = veb_tree::generate_on_device(num_removes);
// hipDeviceSynchronize();
// insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator_3, num_removes,1);
// hipDeviceSynchronize();
// insert_end= std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator_3->space_in_bytes());
// elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
// veb_tree::free_on_device(test_allocator_3);
// hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| 34498a4e35e5b0a7f26c3978e563dd2f40831d98.cu | /*
* ============================================================================
*
* Authors:
* Hunter McCoy <hjmccoy@lbl.gov
*
* ============================================================================
*/
#include <poggers/allocators/veb.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::allocators;
// __global__ void test_kernel(veb_tree * tree, uint64_t num_removes, int num_iterations){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid >= num_removes)return;
// //printf("Tid %lu\n", tid);
// for (int i=0; i< num_iterations; i++){
// if (!tree->remove(tid)){
// printf("BUG\n");
// }
// tree->insert(tid);
// }
__global__ void view_bits(layer * dev_layer){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("First set of bits: %lu\n", dev_layer->bits[0]);
}
__global__ void view_tree_bits(veb_tree * dev_tree){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
for (int i = 0; i< dev_tree->num_layers; i++){
printf("First bits of layer %d: %lu\n", i, dev_tree->layers[i]->bits[0]);
}
printf("End of tree\n");
}
__global__ void remove_insert_kernel(veb_tree * dev_tree, uint64_t num_removes, int num_rounds, uint64_t * misses){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_removes) return;
for (int i = 0; i < num_rounds; i++){
uint64_t remove = dev_tree->malloc();
if (remove != veb_tree::fail()){
bool dev_removes = dev_tree->insert(remove);
if (!dev_removes){ printf("Fail!\n"); }
} else {
atomicAdd((unsigned long long int *) misses, 1ULL);
}
}
}
__global__ void multi_run_remove_insert_kernel(veb_tree * dev_tree, uint64_t num_removes, int num_rounds, uint64_t * misses){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_removes) return;
uint64_t addresses[10];
for (int i = 0; i < num_rounds; i++){
uint64_t remove = dev_tree->malloc();
addresses[i] = remove;
}
for (int i = 0; i < num_rounds; i++){
if (addresses[i] == veb_tree::fail()){
atomicAdd((unsigned long long int *)misses, 1ULL);
} else {
bool remove = dev_tree->insert(addresses[i]);
if (!remove) printf("Failed to re-insert %lu\n", addresses[i]);
}
}
}
__global__ void check_insert_kernel(veb_tree * dev_tree, uint64_t * items, uint64_t * misses, uint64_t num_items){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= num_items) return;
uint64_t offset_at_lowest = dev_tree->malloc();
if (offset_at_lowest == veb_tree::fail()){
atomicAdd((unsigned long long int *)misses, 1ULL);
} else {
items[tid] = offset_at_lowest;
}
}
__global__ void free_insert_kernel(veb_tree * dev_tree, uint64_t * items, uint64_t num_items){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_items) return;
dev_tree->insert(items[tid]);
}
__global__ void assert_unique(uint64_t * items, uint64_t num_items){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= num_items) return;
uint64_t item = items[tid];
//0's are misses
if (item == 0) return;
for (uint64_t i = 0; i < tid; i++){
if (i == tid) continue;
if (item == items[i]){
printf("Conflict betwen %lu and %lu: %lu\n", tid, i, item);
}
}
}
__global__ void remove_insert_kernel_single_thread(veb_tree * dev_tree, uint64_t * items, uint64_t num_removes, int num_rounds){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid != 0) return;
for (int i = 0; i < num_rounds; i++){
for (uint64_t item_counter = 0; item_counter < num_removes; item_counter++){
items[item_counter] = dev_tree->malloc();
assert(items[item_counter] != veb_tree::fail());
}
for (uint64_t item_counter = 0; item_counter < num_removes; item_counter++){
bool dev_removes = dev_tree->insert(items[item_counter]);
assert(dev_removes);
}
}
}
// }
// __global__ void view_kernel(veb_tree * tree){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid != 0)return;
// }
//using allocator_type = buddy_allocator<0,0>;
int main(int argc, char** argv) {
// if (!test_one_thread()){
// printf("Test one thread: [FAIL]\n");
// } else {
// printf("Test one thread: [PASS]\n");
// }
// uint64_t num_removes = 64;
// veb_tree * test_allocator = veb_tree::generate_on_device(num_removes);
// cudaDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator);
// cudaDeviceSynchronize();
// auto insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator, num_removes,1);
// cudaDeviceSynchronize();
// auto insert_end= std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator->space_in_bytes());
// std::chrono::duration<double> elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
//veb_tree::free_on_device(test_allocator);
// for (int i = 0; i < 32; i++){
// layer * try_layer = layer::generate_on_device((1ULL << i));
// cudaDeviceSynchronize();
// view_bits<<<1,1>>>(try_layer);
// cudaDeviceSynchronize();
// layer::free_on_device(try_layer);
// cudaDeviceSynchronize();
// }
// for (int i = 28; i < 29; i++){
// uint64_t num_items = (1ULL << i);
// printf("%d shifted is %lu\n", i, num_items);
// uint64_t * items;
// CHECK_CUDA_ERROR(cudaMalloc((void **)&items, num_items*sizeof(uint64_t)));
// cudaMemset(items, 0, num_items*sizeof(uint64_t));
// uint64_t * misses;
// cudaMallocManaged((void **)&misses, sizeof(uint64_t));
// cudaDeviceSynchronize();
// misses[0] = 0;
// cudaDeviceSynchronize();
// veb_tree * tree = veb_tree::generate_on_device(num_items, i);
// cudaDeviceSynchronize();
// check_insert_kernel<<<(num_items-1)/512+1,512>>>(tree, items, misses, num_items);
// cudaDeviceSynchronize();
// std::cout << "Missed " << misses[0] << "/" << (uint64_t) (num_items) << " items, fraction: " << 1.0*misses[0]/(num_items) << "\n";
// assert_unique<<<(num_items-1)/512+1,512>>>(items, num_items);
// cudaDeviceSynchronize();
// free_insert_kernel<<<(num_items-1)/512+1,512>>>(tree, items, num_items);
// cudaDeviceSynchronize();
// cudaFree(items);
// cudaFree(misses);
// veb_tree::free_on_device(tree);
// cudaDeviceSynchronize();
// }
for (int i = 15; i < 32; i++){
uint64_t num_items = (1ULL << i);
uint64_t * items;
//cudaMalloc((void **)&items, num_items*sizeof(uint64_t));
uint64_t * misses;
cudaMallocManaged((void **)&misses, sizeof(uint64_t));
cudaDeviceSynchronize();
misses[0] = 0;
cudaDeviceSynchronize();
int num_rounds = 10;
printf("Starting tree %d with %lu items\n", i, num_items);
veb_tree * tree = veb_tree::generate_on_device(num_items, 15);
cudaDeviceSynchronize();
auto insert_start = std::chrono::high_resolution_clock::now();
//peek
//view_tree_bits<<<1,1>>>(tree);
//remove_insert_kernel_single_thread<<<1,1>>>(tree, items, num_items, num_rounds);
remove_insert_kernel<<<(num_items-1)/512+1,512>>>(tree, num_items,num_rounds, misses);
//view_tree_bits<<<1,1>>>(tree);
cudaDeviceSynchronize();
//multi_run_remove_insert_kernel<<<(num_items/10-1)/512+1, 512>>>(tree, num_items/10, 10, misses);
cudaDeviceSynchronize();
auto insert_end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = insert_end - insert_start;
std::cout << "Inserted " << num_items*num_rounds << " in " << elapsed_seconds.count() << " seconds, throughput: " << std::fixed << 1.0*(num_items*num_rounds)/elapsed_seconds.count() << std::endl;
std::cout << "Missed " << misses[0] << "/" << (uint64_t) (num_items*num_rounds) << " items, fraction: " << 1.0*misses[0]/(num_items*num_rounds) << "\n";
//cudaFree(items);
cudaFree(misses);
veb_tree::free_on_device(tree);
}
// cudaDeviceSynchronize();
// num_removes = (1ULL << 32);
// veb_tree * test_allocator_2 = veb_tree::generate_on_device(num_removes);
// cudaDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator_2);
// cudaDeviceSynchronize();
// insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator_2, num_removes,1);
// cudaDeviceSynchronize();
// insert_end = std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator_2->space_in_bytes());
// elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
// //printf("Aggregate inserts: %lu in %lu: %f\n", num_removes, time, 1.0*num_removes/time);
// cudaDeviceSynchronize();
// view_kernel<<<1,1>>>(test_allocator_2);
// cudaDeviceSynchronize();
// veb_tree::free_on_device(test_allocator_2);
// cudaDeviceSynchronize();
// num_removes = (1ULL << 34)/128;
// veb_tree * test_allocator_3 = veb_tree::generate_on_device(num_removes);
// cudaDeviceSynchronize();
// insert_start = std::chrono::high_resolution_clock::now();
// test_kernel<<<(num_removes-1)/512+1,512>>>(test_allocator_3, num_removes,1);
// cudaDeviceSynchronize();
// insert_end= std::chrono::high_resolution_clock::now();
// printf("Space usage: %lu\n", test_allocator_3->space_in_bytes());
// elapsed_seconds = insert_end - insert_start;
// std::cout << "Inserted " << num_removes << " in " << elapsed_seconds.count() << "seconds, throughput: " << std::fixed << 1.0*num_removes/elapsed_seconds.count() << std::endl;
// veb_tree::free_on_device(test_allocator_3);
// cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
46c371ea7c936689d0ee68fc72b38ec301e9a83b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ double efficientLocalMean_dev (const long x,const long y,const long k, double * input_img, int rowsize, int colsize) {
long k2 = k/2;
long dimx = rowsize;
long dimy = colsize;
//wanting average over area: (y-k2,x-k2) ... (y+k2-1, x+k2-1)
long starty = y-k2;
long startx = x-k2;
long stopy = y+k2-1;
long stopx = x+k2-1;
if (starty < 0) starty = 0;
if (startx < 0) startx = 0;
if (stopx > dimx-1) stopx = dimx-1;
if (stopy > dimy-1) stopy = dimy-1;
double unten, links, oben, obenlinks;
if (startx-1 < 0) links = 0;
else links = *(input_img+(stopy * dimx + startx-1));
if (starty-1 < 0) oben = 0;
else oben = *(input_img+((stopy-1) * dimx + startx));
if ((starty-1 < 0) || (startx-1 <0)) obenlinks = 0;
else obenlinks = *(input_img+((stopy-1) * dimx + startx-1));
unten = *(input_img+(stopy * dimx + startx));
long counter = (stopy-starty+1)*(stopx-startx+1);
return (unten-links-oben+obenlinks)/counter;
}
__global__ void process_coarseness_ak_pix(double * output_ak,double * input_img,int colsize, int rowsize,long lenOf_ak)
{
int index;
int y = threadIdx.x + blockIdx.x * blockDim.x;
int x = threadIdx.y + blockIdx.y * blockDim.y;
if(y < (colsize) && x < (rowsize))
{
index = y * rowsize + x ;
output_ak[index] = efficientLocalMean_dev(x,y,lenOf_ak,input_img,rowsize,colsize);
}
} | 46c371ea7c936689d0ee68fc72b38ec301e9a83b.cu | #include "includes.h"
__device__ double efficientLocalMean_dev (const long x,const long y,const long k, double * input_img, int rowsize, int colsize) {
long k2 = k/2;
long dimx = rowsize;
long dimy = colsize;
//wanting average over area: (y-k2,x-k2) ... (y+k2-1, x+k2-1)
long starty = y-k2;
long startx = x-k2;
long stopy = y+k2-1;
long stopx = x+k2-1;
if (starty < 0) starty = 0;
if (startx < 0) startx = 0;
if (stopx > dimx-1) stopx = dimx-1;
if (stopy > dimy-1) stopy = dimy-1;
double unten, links, oben, obenlinks;
if (startx-1 < 0) links = 0;
else links = *(input_img+(stopy * dimx + startx-1));
if (starty-1 < 0) oben = 0;
else oben = *(input_img+((stopy-1) * dimx + startx));
if ((starty-1 < 0) || (startx-1 <0)) obenlinks = 0;
else obenlinks = *(input_img+((stopy-1) * dimx + startx-1));
unten = *(input_img+(stopy * dimx + startx));
long counter = (stopy-starty+1)*(stopx-startx+1);
return (unten-links-oben+obenlinks)/counter;
}
__global__ void process_coarseness_ak_pix(double * output_ak,double * input_img,int colsize, int rowsize,long lenOf_ak)
{
int index;
int y = threadIdx.x + blockIdx.x * blockDim.x;
int x = threadIdx.y + blockIdx.y * blockDim.y;
if(y < (colsize) && x < (rowsize))
{
index = y * rowsize + x ;
output_ak[index] = efficientLocalMean_dev(x,y,lenOf_ak,input_img,rowsize,colsize);
}
} |
712a3307581e51ea3b45bd75334ad14f8d4adef9.hip | // !!! This is a file automatically generated by hipify!!!
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. avzis <michael.aivazis@para-sim.com>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// cuda
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the SAT generation kernel
template <typename value_t = float>
__global__
static void
_nudge(std::size_t pairs, // the total number of tiles
std::size_t oldDim, // the old shape of the target tiles
std::size_t newDim, // the new shape of the target tiles
std::size_t margin, // the new margin of the search window
int * loc);
// run through the correlation matrix for each, find its maximum value and record its location
void
ampcor::cuda::kernels::
nudge(std::size_t pairs, // the total number of tiles
std::size_t refDim, // the shape of the reference tiles
std::size_t tgtDim, // the shape of the target tiles
std::size_t margin, // the new margin around the reference tile
int * loc)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to nudge the " << pairs
<< " maxima locations"
<< pyre::journal::endl;
// launch the kernels
hipLaunchKernelGGL(( _nudge) , dim3(B),dim3(T), 0, 0, pairs, tgtDim, refDim+2*margin, margin, loc);
// wait for the kernels to finish
hipError_t status = hipDeviceSynchronize();
// check
if (status != hipSuccess) {
// get the description of the error
std::string description = hipGetErrorName(status);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while nudging the new target tile locations: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the SAT generation kernel
template <typename value_t>
__global__
void
_nudge(std::size_t pairs, // the total number of tiles
std::size_t oldDim, // the shape of the target tiles
std::size_t newDim, // the shape of the target tiles
std::size_t margin, // the new margin of the search window
int * loc)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= pairs) {
// nothing for me to do
return;
}
// locate the beginning of my stats table
auto myloc = loc + 2*w;
// read my position
int row = myloc[0];
int col = myloc[1];
// let's do LR nudging first
int left = col - margin;
// if it sticks out in the left
if (left < 0) {
// move it to the far left
left = 0;
}
// if it sticks out on the right
if (left + newDim > oldDim) {
// move so that it fits
left = oldDim - newDim;
}
// repeat for TB
int top = row - margin;
// if it sticks outside the top of the tile
if (top < 0) {
// move it to the top row
top = 0;
}
// if it sticks out below the bottom row
if (top + newDim > oldDim) {
// move it up so it fits
top = oldDim - newDim;
}
// write the new locations
myloc[0] = top;
myloc[1] = left;
// all done
return;
}
// end of file
| 712a3307581e51ea3b45bd75334ad14f8d4adef9.cu | // -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. aïvázis <michael.aivazis@para-sim.com>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// cuda
#include <cuda_runtime.h>
#include <cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the SAT generation kernel
template <typename value_t = float>
__global__
static void
_nudge(std::size_t pairs, // the total number of tiles
std::size_t oldDim, // the old shape of the target tiles
std::size_t newDim, // the new shape of the target tiles
std::size_t margin, // the new margin of the search window
int * loc);
// run through the correlation matrix for each, find its maximum value and record its location
void
ampcor::cuda::kernels::
nudge(std::size_t pairs, // the total number of tiles
std::size_t refDim, // the shape of the reference tiles
std::size_t tgtDim, // the shape of the target tiles
std::size_t margin, // the new margin around the reference tile
int * loc)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to nudge the " << pairs
<< " maxima locations"
<< pyre::journal::endl;
// launch the kernels
_nudge <<<B,T>>> (pairs, tgtDim, refDim+2*margin, margin, loc);
// wait for the kernels to finish
cudaError_t status = cudaDeviceSynchronize();
// check
if (status != cudaSuccess) {
// get the description of the error
std::string description = cudaGetErrorName(status);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while nudging the new target tile locations: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the SAT generation kernel
template <typename value_t>
__global__
void
_nudge(std::size_t pairs, // the total number of tiles
std::size_t oldDim, // the shape of the target tiles
std::size_t newDim, // the shape of the target tiles
std::size_t margin, // the new margin of the search window
int * loc)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= pairs) {
// nothing for me to do
return;
}
// locate the beginning of my stats table
auto myloc = loc + 2*w;
// read my position
int row = myloc[0];
int col = myloc[1];
// let's do LR nudging first
int left = col - margin;
// if it sticks out in the left
if (left < 0) {
// move it to the far left
left = 0;
}
// if it sticks out on the right
if (left + newDim > oldDim) {
// move so that it fits
left = oldDim - newDim;
}
// repeat for TB
int top = row - margin;
// if it sticks outside the top of the tile
if (top < 0) {
// move it to the top row
top = 0;
}
// if it sticks out below the bottom row
if (top + newDim > oldDim) {
// move it up so it fits
top = oldDim - newDim;
}
// write the new locations
myloc[0] = top;
myloc[1] = left;
// all done
return;
}
// end of file
|
e00efa6a1e86369263c7690489e424b77f922c48.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MCMC_GPU_SP_CU__
#define __MCMC_GPU_SP_CU__
#include "mcmc_gpu_sp.h"
const int PRIOR_SD = 10;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void sp_sampler(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct,
mcmc_v_str mcdata, gpu_v_str gpu, out_str *res)
{
int accepted_samples;
clock_t startBurn, stopBurn;
clock_t startMcmc, stopMcmc;
// print_gpu_info();
hipSetDevice(0);
hipblasHandle_t handle;
hipblasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = mcin.Nd * sizeof(float);
float *host_lhoodf = (float*) malloc(mcin.Nd*sizeof(float));
hipMalloc(&d.samplesf, sz.samples);
hipMalloc(&d.dataf, sz.data);
hipMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
hipMalloc(&d.lhoodf, sz.lhood);
hipMemcpy(d.dataf, data.dataf, sz.data, hipMemcpyHostToDevice);
startBurn = clock();
if(mcin.burnin != 0)
burn_in_metropolis_sp(handle, r, mcin, mct, mcdata, mclocv, &mcloc, sz, gpu, d, host_lhoodf);
stopBurn = clock() - startBurn;
accepted_samples = 0;
startMcmc = clock();
metropolis_sp(handle, r, mcin, mct, mcdata, mclocv, &mcloc, &accepted_samples, sz, gpu, d, host_lhoodf, res);
stopMcmc = clock() - startMcmc;
res->burnTime = stopBurn * 1000 / CLOCKS_PER_SEC; // burn in time in ms
res->mcmcTime = stopMcmc * 1000 / CLOCKS_PER_SEC; // mcmc time in ms
res->acceptance = (float)accepted_samples / mcin.Ns;
hipblasDestroy(handle);
hipFree(d.samples);
hipFree(d.data);
hipFree(d.cuLhood);
hipFree(d.lhood);
free(host_lhoodf);
free_mcmc_vectors_sp(mclocv, mcin);
}
void metropolis_sp(hipblasHandle_t handle, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct, mcmc_v_str mcdata,
mcmc_int_v mclocv, mcmc_int *mcloc, int *accepted_samples, sz_str sz,
gpu_v_str gpu, dev_v_str d, float *host_lhoodf, out_str *res)
{
int i, dim_idx;
float plhood;
res->cuTime = 0;
res->cuBandwidth = 0;
res->kernelTime = 0;
res->kernelBandwidth = 0;
res->gpuTime = 0;
res->gpuBandwidth = 0;
fprintf(stdout, "Starting metropolis algorithm. Selected rwsdf = %f\n", mct.rwsdf);
for(i=0; i<mcin.Ns; i++)
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
// random walk using Marsaglia-Tsang ziggurat algorithm
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx] + (float) gsl_ran_gaussian_ziggurat(r, (double)mct.rwsdf);
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, res);
// calculate acceptance ratio
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef)
{
// accept proposed sample
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
{
mcdata.samplesf[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf;
*accepted_samples += 1;
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mcdata.samplesf[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
}
}
fprintf(stdout, "Metropolis algorithm finished. Accepted Samples = %d\n\n", *accepted_samples);
}
void burn_in_metropolis_sp(hipblasHandle_t handle, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct, mcmc_v_str mcdata,
mcmc_int_v mclocv, mcmc_int *mcloc, sz_str sz, gpu_v_str gpu,
dev_v_str d, float *host_lhoodf)
{
int i, dim_idx;
float plhood, clhood;
out_str res;
fprintf(stdout, "Starting burn in process. Selected rwsdf = %f\n", mct.rwsdf);
// initialize burn in sequence
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.currentf[dim_idx] = mcdata.burnf[dim_idx];
clhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.currentf, sz.samples, d, host_lhoodf, &res);
// calculate the current posterior
mcloc->cposteriorf = log_prior_sp(mclocv.currentf, mcin) + clhood;
// start burn in
for(i=1; i<mcin.burnin; i++)
{
// propose next sample
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx] + (float) gsl_ran_gaussian_ziggurat(r, (double)mct.rwsdf); // random walk using Marsaglia-Tsang ziggurat algorithm
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef)
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
{
mcdata.burnf[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf;
// printf("ok10\n");
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mcdata.burnf[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
// printf("ok11\n");
}
}
fprintf(stdout, "Burn in process finished.\n\n");
}
float reduction_f(gpu_v_str gpu, dev_v_str d, float *host_lhoodf, float *ke_acc_Bytes)
{
float gpu_result = 0;
int i;
int numBlocks = gpu.blocks;
int threads, blocks;
*ke_acc_Bytes = gpu.size * sizeof(float);
reduceSum_f(gpu.size, gpu.threads, gpu.blocks, gpu.kernel, d.cuLhoodf, d.lhoodf, LogRegression);
while(numBlocks >= gpu.cpuThresh)
{
getBlocksAndThreads(gpu.kernel, numBlocks, gpu.maxBlocks, gpu.maxThreads, &blocks, &threads);
ke_acc_Bytes += numBlocks * sizeof(float);
gpuErrchk(hipMemcpy(d.cuLhoodf, d.lhoodf, numBlocks*sizeof(float), hipMemcpyDeviceToDevice));
reduceSum_f(numBlocks, threads, blocks, gpu.kernel, d.cuLhoodf, d.lhoodf, Reduction);
if(gpu.kernel < 3)
{
numBlocks = (numBlocks + threads - 1) / threads;
}else{
numBlocks = (numBlocks +(threads*2-1)) / (threads*2);
}
}
gpuErrchk(hipMemcpy(host_lhoodf, d.lhoodf, numBlocks*sizeof(float), hipMemcpyDeviceToHost));
// accumulate result on CPU
for(i=0; i<numBlocks; i++){
gpu_result += host_lhoodf[i];
}
return gpu_result;
}
float gpu_likelihood_f(hipblasHandle_t handle, mcmc_str mcin, gpu_v_str gpu, float *samplesf, float sampleSz,
dev_v_str d, float *host_lhoodf, out_str *res)
{
float ke_acc_Bytes = 0;
float cuBytes = 0;
float reduced_lhood = 0;
float a = 1.0;
float b = 0.0;
float cu_ms = 0;
float ke_ms = 0;
hipEvent_t cuStart, cuStop, keStart, keStop;
gpuErrchk(hipEventCreate(&cuStart));
gpuErrchk(hipEventCreate(&cuStop));
gpuErrchk(hipEventCreate(&keStart));
gpuErrchk(hipEventCreate(&keStop));
gpuErrchk(hipMemcpy(d.samplesf, samplesf, sampleSz, hipMemcpyHostToDevice));
hipEventRecord(cuStart);
hipblasSgemv(handle, HIPBLAS_OP_N, mcin.Nd, mcin.ddata, &a, d.dataf, mcin.Nd, d.samplesf, 1, &b, d.cuLhoodf, 1);
gpuErrchk(hipDeviceSynchronize());
hipEventRecord(cuStop);
hipEventRecord(keStart);
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
reduced_lhood = reduction_f(gpu, d, host_lhoodf, &ke_acc_Bytes);
gpuErrchk(hipDeviceSynchronize());
hipEventRecord(keStop);
hipEventSynchronize(cuStop);
hipEventSynchronize(keStop);
hipEventElapsedTime(&cu_ms, cuStart, cuStop);
hipEventElapsedTime(&ke_ms, keStart, keStop);
cuBytes = mcin.Nd * (mcin.ddata + 2) * sizeof(float);
res->cuTime += cu_ms / mcin.Ns; // average cuBlas time
res->cuBandwidth += (cuBytes / cu_ms / 1e6) / mcin.Ns;
res->kernelTime += ke_ms / mcin.Ns;
res->kernelBandwidth += (ke_acc_Bytes / ke_ms / 1e6) / mcin.Ns;
res->gpuTime += (cu_ms + ke_ms) / mcin.Ns;
res->gpuBandwidth += ((cuBytes + ke_acc_Bytes) / (cu_ms + ke_ms) / 1e6) / mcin.Ns;
return reduced_lhood;
}
// // tune rwsdf for a target acceptance ratio
void tune_ess_sp(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str *mct, gpu_v_str gpu, float *initCond, int length)
{
// print_gpu_info();
hipSetDevice(0);
hipblasHandle_t handle;
hipblasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = gpu.blocks * sizeof(float);
hipMalloc(&d.samplesf, sz.samples);
hipMalloc(&d.dataf, sz.data);
hipMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
hipMalloc(&d.lhoodf, sz.lhood);
hipMemcpy(d.dataf, data.dataf, sz.data, hipMemcpyHostToDevice);
int chain_length = length;
int runs = 40;
float target_a[] = {0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50};
float error_tolerance = 0.01;
float min_error = 9999999;
float max_ess = -9999999;
float lagidx = 500;
float sd = mct->rwsdf;
float ess_sd = sd;
int accepted_samples, run, a_idx;
float acc_ratio_c, acc_error_c, best_acc_ratio;
float circ_sum, best_sd, ess_c;
float *samples = NULL;
samples = (float*) malloc(mcin.ddata * chain_length * sizeof(float));
if(samples == NULL)
fprintf(stderr, "ERROR: Samples vector did not allocated.\n");
float *autocorr_lagk = NULL;
autocorr_lagk = (float*) malloc(lagidx * sizeof(float));
if(autocorr_lagk == NULL)
fprintf(stderr, "ERROR: Autocorrelation vector did not allocated.\n");
fprintf(stdout, "\nStarting tuning process. rwsdf = %5.3f\n", sd);
for(a_idx=0; a_idx<9; a_idx++){
fprintf(stdout, "\tStarting tuning for target ratio = %4.3f. Current rwsdf = %5.3f\n", target_a[a_idx], sd);
min_error = 9999999;
for(run=0; run<runs; run++)
{
fprintf(stdout, "\t\tStarting Run %2d. Current rwsdf = %5.3f\n", run, sd);
accepted_samples = 0;
short_run_burn_in_sp(handle, r, mclocv, mcin, sd, &mcloc, sz, gpu, d, data.mvoutf, initCond);
short_run_metropolis_sp(handle, r, mclocv, mcin, chain_length, sd, &mcloc,
samples, &accepted_samples, sz, gpu, d, data.mvoutf);
acc_ratio_c = accepted_samples/(float)chain_length;
acc_error_c = fabs(acc_ratio_c - target_a[a_idx]);
if(acc_error_c < min_error) // accept the current sd
{
best_sd = sd;
min_error = acc_error_c;
best_acc_ratio = acc_ratio_c;
fprintf(stdout, "\t\t\tAccepted: rwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
best_sd, best_acc_ratio, min_error);
}else{
fprintf(stdout, "\t\t\trwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
sd, acc_ratio_c, acc_error_c);
}
if(min_error < error_tolerance)
break;
sd *= acc_ratio_c/target_a[a_idx];
}
circ_sum = circular_autocorrelation_sp(autocorr_lagk, samples, mcin.ddata,
chain_length, lagidx);
ess_c = chain_length / (1 + 2*circ_sum);
if(ess_c > max_ess)
{
max_ess = ess_c;
ess_sd = sd;
fprintf(stdout, "\tAccepted: ess = %8.3f, rwsdf = %5.3f\n", max_ess, ess_sd);
}else{
fprintf(stdout, "\tess= %8.3f, rwsdf = %5.3f\n", ess_c, sd);
}
}
mct->rwsdf = ess_sd;
fprintf(stdout, "Tuning finished. Selected rwsdf = %5.3f\n\n", mct->rwsdf);
hipblasDestroy(handle);
free(samples);
free(autocorr_lagk);
hipFree(d.samples);
hipFree(d.data);
hipFree(d.cuLhood);
hipFree(d.lhood);
free_mcmc_vectors_sp(mclocv, mcin);
}
// // tune rwsdf for a target acceptance ratio
void tune_target_a_sp_v2(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str *mct,
gpu_v_str gpu, float *initCond, float ratio, int max_reps)
{
// print_gpu_info();
hipSetDevice(0);
hipblasHandle_t handle;
hipblasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = gpu.blocks * sizeof(float);
hipMalloc(&d.samplesf, sz.samples);
hipMalloc(&d.dataf, sz.data);
hipMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
hipMalloc(&d.lhoodf, sz.lhood);
hipMemcpy(d.dataf, data.dataf, sz.data, hipMemcpyHostToDevice);
int chain_length = 5000;
int runs = max_reps;
float target_a = ratio;
float error_tolerance = 0.01;
float min_error = 9999999;
float sd = mct->rwsdf;
float best_sd = sd;
int accepted_samples, run;
float acc_ratio_c, acc_error_c, best_acc_ratio;
float *samples = NULL;
samples = (float*) malloc(mcin.ddata * chain_length * sizeof(float));
if(samples == NULL)
fprintf(stderr, "ERROR: Samples vector did not allocated.\n");
fprintf(stdout, "\nStarting tuning process. rwsdf = %5.3f\n", sd);
for(run=0; run<runs; run++)
{
fprintf(stdout, "\tStarting Run %2d. Current rwsdf = %5.3f\n", run, sd);
accepted_samples = 0;
short_run_burn_in_sp(handle, r, mclocv, mcin, sd, &mcloc, sz, gpu, d, data.mvoutf, initCond);
short_run_metropolis_sp(handle, r, mclocv, mcin, chain_length, sd, &mcloc,
samples, &accepted_samples, sz, gpu, d, data.mvoutf);
acc_ratio_c = accepted_samples/(float)chain_length;
acc_error_c = fabs(acc_ratio_c - target_a);
if(acc_error_c < min_error) // accept the current sd
{
best_sd = sd;
min_error = acc_error_c;
best_acc_ratio = acc_ratio_c;
fprintf(stdout, "\t\tAccepted: rwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
best_sd, best_acc_ratio, min_error);
}else{
fprintf(stdout, "\t\trwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
sd, acc_ratio_c, acc_error_c);
}
if(min_error < error_tolerance)
break;
sd *= acc_ratio_c/target_a;
}
mct->rwsdf = best_sd;
fprintf(stdout, "Tuning finished. Selected rwsdf = %5.3f\n\n", mct->rwsdf);
hipblasDestroy(handle);
free(samples);
hipFree(d.samples);
hipFree(d.data);
hipFree(d.cuLhood);
hipFree(d.lhood);
free_mcmc_vectors_sp(mclocv, mcin);
}
void short_run_burn_in_sp(hipblasHandle_t handle, gsl_rng *r, mcmc_int_v mclocv, mcmc_str mcin, float sd, mcmc_int *mcloc,
sz_str sz, gpu_v_str gpu, dev_v_str d, float *host_lhoodf, float *initCond)
{
int i, dim_idx;
float plhood, clhood;
out_str res;
// initialize burn in sequence
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.currentf[dim_idx] = 0;
clhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.currentf, sz.samples, d, host_lhoodf, &res);
// calculate the current posterior
mcloc->cposteriorf = log_prior_sp(mclocv.currentf, mcin) + clhood;
// start burn-in
for(i=1; i<mcin.burnin; i++)
{
for(dim_idx = 0; dim_idx < mcin.ddata; dim_idx++){
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx]
+ (float) gsl_ran_gaussian_ziggurat(r, (double)sd); // random walk using Marsaglia-Tsang ziggurat algorithm
}
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef) // decide if you accept the proposed theta or not
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf; // make proposed posterior the current
}
}
}
void short_run_metropolis_sp(hipblasHandle_t handle, gsl_rng *r, mcmc_int_v mclocv, mcmc_str mcin, int chain_length, float sd,
mcmc_int *mcloc, float *samples, int *accepted_samples, sz_str sz,
gpu_v_str gpu, dev_v_str d, float *host_lhoodf)
{
int i, dim_idx;
float plhood;
out_str res;
// start metropolis
for(i=0; i < chain_length; i++){
for(dim_idx = 0; dim_idx < mcin.ddata; dim_idx++){
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx]
+ (float) gsl_ran_gaussian_ziggurat(r, (double)sd); // random walk using Marsaglia-Tsang ziggurat algorithm
}
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef) // decide if you accept the proposed theta or not
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
samples[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf; // make proposed posterior the current
*accepted_samples += 1;
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
samples[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
}
}
}
}
float acceptance_ratio_sp(mcmc_int_v mclocv, mcmc_int *mcloc, mcmc_str mcin, float plhood)
{
float log_ratio;
mcloc->pposteriorf = log_prior_sp(mclocv.proposedf, mcin) + plhood;
log_ratio = mcloc->pposteriorf - mcloc->cposteriorf;
return log_ratio;
}
float log_prior_sp(float *sample, mcmc_str mcin)
{
float log_prob = 0;
int dim_idx;
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){ //assuming iid priors
log_prob += log(gsl_ran_gaussian_pdf(sample[dim_idx], PRIOR_SD));
}
return log_prob;
}
#endif // __MCMC_GPU_SP_CU__ | e00efa6a1e86369263c7690489e424b77f922c48.cu | #ifndef __MCMC_GPU_SP_CU__
#define __MCMC_GPU_SP_CU__
#include "mcmc_gpu_sp.h"
const int PRIOR_SD = 10;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void sp_sampler(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct,
mcmc_v_str mcdata, gpu_v_str gpu, out_str *res)
{
int accepted_samples;
clock_t startBurn, stopBurn;
clock_t startMcmc, stopMcmc;
// print_gpu_info();
cudaSetDevice(0);
cublasHandle_t handle;
cublasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = mcin.Nd * sizeof(float);
float *host_lhoodf = (float*) malloc(mcin.Nd*sizeof(float));
cudaMalloc(&d.samplesf, sz.samples);
cudaMalloc(&d.dataf, sz.data);
cudaMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
cudaMalloc(&d.lhoodf, sz.lhood);
cudaMemcpy(d.dataf, data.dataf, sz.data, cudaMemcpyHostToDevice);
startBurn = clock();
if(mcin.burnin != 0)
burn_in_metropolis_sp(handle, r, mcin, mct, mcdata, mclocv, &mcloc, sz, gpu, d, host_lhoodf);
stopBurn = clock() - startBurn;
accepted_samples = 0;
startMcmc = clock();
metropolis_sp(handle, r, mcin, mct, mcdata, mclocv, &mcloc, &accepted_samples, sz, gpu, d, host_lhoodf, res);
stopMcmc = clock() - startMcmc;
res->burnTime = stopBurn * 1000 / CLOCKS_PER_SEC; // burn in time in ms
res->mcmcTime = stopMcmc * 1000 / CLOCKS_PER_SEC; // mcmc time in ms
res->acceptance = (float)accepted_samples / mcin.Ns;
cublasDestroy(handle);
cudaFree(d.samples);
cudaFree(d.data);
cudaFree(d.cuLhood);
cudaFree(d.lhood);
free(host_lhoodf);
free_mcmc_vectors_sp(mclocv, mcin);
}
void metropolis_sp(cublasHandle_t handle, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct, mcmc_v_str mcdata,
mcmc_int_v mclocv, mcmc_int *mcloc, int *accepted_samples, sz_str sz,
gpu_v_str gpu, dev_v_str d, float *host_lhoodf, out_str *res)
{
int i, dim_idx;
float plhood;
res->cuTime = 0;
res->cuBandwidth = 0;
res->kernelTime = 0;
res->kernelBandwidth = 0;
res->gpuTime = 0;
res->gpuBandwidth = 0;
fprintf(stdout, "Starting metropolis algorithm. Selected rwsdf = %f\n", mct.rwsdf);
for(i=0; i<mcin.Ns; i++)
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
// random walk using Marsaglia-Tsang ziggurat algorithm
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx] + (float) gsl_ran_gaussian_ziggurat(r, (double)mct.rwsdf);
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, res);
// calculate acceptance ratio
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef)
{
// accept proposed sample
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
{
mcdata.samplesf[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf;
*accepted_samples += 1;
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mcdata.samplesf[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
}
}
fprintf(stdout, "Metropolis algorithm finished. Accepted Samples = %d\n\n", *accepted_samples);
}
void burn_in_metropolis_sp(cublasHandle_t handle, gsl_rng *r, mcmc_str mcin, mcmc_tune_str mct, mcmc_v_str mcdata,
mcmc_int_v mclocv, mcmc_int *mcloc, sz_str sz, gpu_v_str gpu,
dev_v_str d, float *host_lhoodf)
{
int i, dim_idx;
float plhood, clhood;
out_str res;
fprintf(stdout, "Starting burn in process. Selected rwsdf = %f\n", mct.rwsdf);
// initialize burn in sequence
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.currentf[dim_idx] = mcdata.burnf[dim_idx];
clhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.currentf, sz.samples, d, host_lhoodf, &res);
// calculate the current posterior
mcloc->cposteriorf = log_prior_sp(mclocv.currentf, mcin) + clhood;
// start burn in
for(i=1; i<mcin.burnin; i++)
{
// propose next sample
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx] + (float) gsl_ran_gaussian_ziggurat(r, (double)mct.rwsdf); // random walk using Marsaglia-Tsang ziggurat algorithm
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef)
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
{
mcdata.burnf[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf;
// printf("ok10\n");
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mcdata.burnf[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
// printf("ok11\n");
}
}
fprintf(stdout, "Burn in process finished.\n\n");
}
float reduction_f(gpu_v_str gpu, dev_v_str d, float *host_lhoodf, float *ke_acc_Bytes)
{
float gpu_result = 0;
int i;
int numBlocks = gpu.blocks;
int threads, blocks;
*ke_acc_Bytes = gpu.size * sizeof(float);
reduceSum_f(gpu.size, gpu.threads, gpu.blocks, gpu.kernel, d.cuLhoodf, d.lhoodf, LogRegression);
while(numBlocks >= gpu.cpuThresh)
{
getBlocksAndThreads(gpu.kernel, numBlocks, gpu.maxBlocks, gpu.maxThreads, &blocks, &threads);
ke_acc_Bytes += numBlocks * sizeof(float);
gpuErrchk(cudaMemcpy(d.cuLhoodf, d.lhoodf, numBlocks*sizeof(float), cudaMemcpyDeviceToDevice));
reduceSum_f(numBlocks, threads, blocks, gpu.kernel, d.cuLhoodf, d.lhoodf, Reduction);
if(gpu.kernel < 3)
{
numBlocks = (numBlocks + threads - 1) / threads;
}else{
numBlocks = (numBlocks +(threads*2-1)) / (threads*2);
}
}
gpuErrchk(cudaMemcpy(host_lhoodf, d.lhoodf, numBlocks*sizeof(float), cudaMemcpyDeviceToHost));
// accumulate result on CPU
for(i=0; i<numBlocks; i++){
gpu_result += host_lhoodf[i];
}
return gpu_result;
}
float gpu_likelihood_f(cublasHandle_t handle, mcmc_str mcin, gpu_v_str gpu, float *samplesf, float sampleSz,
dev_v_str d, float *host_lhoodf, out_str *res)
{
float ke_acc_Bytes = 0;
float cuBytes = 0;
float reduced_lhood = 0;
float a = 1.0;
float b = 0.0;
float cu_ms = 0;
float ke_ms = 0;
cudaEvent_t cuStart, cuStop, keStart, keStop;
gpuErrchk(cudaEventCreate(&cuStart));
gpuErrchk(cudaEventCreate(&cuStop));
gpuErrchk(cudaEventCreate(&keStart));
gpuErrchk(cudaEventCreate(&keStop));
gpuErrchk(cudaMemcpy(d.samplesf, samplesf, sampleSz, cudaMemcpyHostToDevice));
cudaEventRecord(cuStart);
cublasSgemv(handle, CUBLAS_OP_N, mcin.Nd, mcin.ddata, &a, d.dataf, mcin.Nd, d.samplesf, 1, &b, d.cuLhoodf, 1);
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(cuStop);
cudaEventRecord(keStart);
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
reduced_lhood = reduction_f(gpu, d, host_lhoodf, &ke_acc_Bytes);
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(keStop);
cudaEventSynchronize(cuStop);
cudaEventSynchronize(keStop);
cudaEventElapsedTime(&cu_ms, cuStart, cuStop);
cudaEventElapsedTime(&ke_ms, keStart, keStop);
cuBytes = mcin.Nd * (mcin.ddata + 2) * sizeof(float);
res->cuTime += cu_ms / mcin.Ns; // average cuBlas time
res->cuBandwidth += (cuBytes / cu_ms / 1e6) / mcin.Ns;
res->kernelTime += ke_ms / mcin.Ns;
res->kernelBandwidth += (ke_acc_Bytes / ke_ms / 1e6) / mcin.Ns;
res->gpuTime += (cu_ms + ke_ms) / mcin.Ns;
res->gpuBandwidth += ((cuBytes + ke_acc_Bytes) / (cu_ms + ke_ms) / 1e6) / mcin.Ns;
return reduced_lhood;
}
// // tune rwsdf for a target acceptance ratio
void tune_ess_sp(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str *mct, gpu_v_str gpu, float *initCond, int length)
{
// print_gpu_info();
cudaSetDevice(0);
cublasHandle_t handle;
cublasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = gpu.blocks * sizeof(float);
cudaMalloc(&d.samplesf, sz.samples);
cudaMalloc(&d.dataf, sz.data);
cudaMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
cudaMalloc(&d.lhoodf, sz.lhood);
cudaMemcpy(d.dataf, data.dataf, sz.data, cudaMemcpyHostToDevice);
int chain_length = length;
int runs = 40;
float target_a[] = {0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50};
float error_tolerance = 0.01;
float min_error = 9999999;
float max_ess = -9999999;
float lagidx = 500;
float sd = mct->rwsdf;
float ess_sd = sd;
int accepted_samples, run, a_idx;
float acc_ratio_c, acc_error_c, best_acc_ratio;
float circ_sum, best_sd, ess_c;
float *samples = NULL;
samples = (float*) malloc(mcin.ddata * chain_length * sizeof(float));
if(samples == NULL)
fprintf(stderr, "ERROR: Samples vector did not allocated.\n");
float *autocorr_lagk = NULL;
autocorr_lagk = (float*) malloc(lagidx * sizeof(float));
if(autocorr_lagk == NULL)
fprintf(stderr, "ERROR: Autocorrelation vector did not allocated.\n");
fprintf(stdout, "\nStarting tuning process. rwsdf = %5.3f\n", sd);
for(a_idx=0; a_idx<9; a_idx++){
fprintf(stdout, "\tStarting tuning for target ratio = %4.3f. Current rwsdf = %5.3f\n", target_a[a_idx], sd);
min_error = 9999999;
for(run=0; run<runs; run++)
{
fprintf(stdout, "\t\tStarting Run %2d. Current rwsdf = %5.3f\n", run, sd);
accepted_samples = 0;
short_run_burn_in_sp(handle, r, mclocv, mcin, sd, &mcloc, sz, gpu, d, data.mvoutf, initCond);
short_run_metropolis_sp(handle, r, mclocv, mcin, chain_length, sd, &mcloc,
samples, &accepted_samples, sz, gpu, d, data.mvoutf);
acc_ratio_c = accepted_samples/(float)chain_length;
acc_error_c = fabs(acc_ratio_c - target_a[a_idx]);
if(acc_error_c < min_error) // accept the current sd
{
best_sd = sd;
min_error = acc_error_c;
best_acc_ratio = acc_ratio_c;
fprintf(stdout, "\t\t\tAccepted: rwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
best_sd, best_acc_ratio, min_error);
}else{
fprintf(stdout, "\t\t\trwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
sd, acc_ratio_c, acc_error_c);
}
if(min_error < error_tolerance)
break;
sd *= acc_ratio_c/target_a[a_idx];
}
circ_sum = circular_autocorrelation_sp(autocorr_lagk, samples, mcin.ddata,
chain_length, lagidx);
ess_c = chain_length / (1 + 2*circ_sum);
if(ess_c > max_ess)
{
max_ess = ess_c;
ess_sd = sd;
fprintf(stdout, "\tAccepted: ess = %8.3f, rwsdf = %5.3f\n", max_ess, ess_sd);
}else{
fprintf(stdout, "\tess= %8.3f, rwsdf = %5.3f\n", ess_c, sd);
}
}
mct->rwsdf = ess_sd;
fprintf(stdout, "Tuning finished. Selected rwsdf = %5.3f\n\n", mct->rwsdf);
cublasDestroy(handle);
free(samples);
free(autocorr_lagk);
cudaFree(d.samples);
cudaFree(d.data);
cudaFree(d.cuLhood);
cudaFree(d.lhood);
free_mcmc_vectors_sp(mclocv, mcin);
}
// // tune rwsdf for a target acceptance ratio
void tune_target_a_sp_v2(data_str data, gsl_rng *r, mcmc_str mcin, mcmc_tune_str *mct,
gpu_v_str gpu, float *initCond, float ratio, int max_reps)
{
// print_gpu_info();
cudaSetDevice(0);
cublasHandle_t handle;
cublasCreate(&handle);
mcmc_int_v mclocv;
mcmc_int mcloc;
mcloc.cposteriorf = 0;
mcloc.pposteriorf = 0;
mcloc.acceptancef = 0;
mcloc.uf = 0;
malloc_mcmc_vectors_sp(&mclocv, mcin);
// set up the gpu vectors
sz_str sz;
dev_v_str d;
getBlocksAndThreads(gpu.kernel, mcin.Nd, gpu.maxBlocks, gpu.maxThreads, &gpu.blocks, &gpu.threads);
sz.samples = mcin.ddata * sizeof(float);
sz.data = mcin.ddata * mcin.Nd * sizeof(float);
sz.cuLhood = mcin.Nd * sizeof(float);
sz.lhood = gpu.blocks * sizeof(float);
cudaMalloc(&d.samplesf, sz.samples);
cudaMalloc(&d.dataf, sz.data);
cudaMalloc(&d.cuLhoodf, sz.cuLhood); // kernel will return a vector of likelihoods
cudaMalloc(&d.lhoodf, sz.lhood);
cudaMemcpy(d.dataf, data.dataf, sz.data, cudaMemcpyHostToDevice);
int chain_length = 5000;
int runs = max_reps;
float target_a = ratio;
float error_tolerance = 0.01;
float min_error = 9999999;
float sd = mct->rwsdf;
float best_sd = sd;
int accepted_samples, run;
float acc_ratio_c, acc_error_c, best_acc_ratio;
float *samples = NULL;
samples = (float*) malloc(mcin.ddata * chain_length * sizeof(float));
if(samples == NULL)
fprintf(stderr, "ERROR: Samples vector did not allocated.\n");
fprintf(stdout, "\nStarting tuning process. rwsdf = %5.3f\n", sd);
for(run=0; run<runs; run++)
{
fprintf(stdout, "\tStarting Run %2d. Current rwsdf = %5.3f\n", run, sd);
accepted_samples = 0;
short_run_burn_in_sp(handle, r, mclocv, mcin, sd, &mcloc, sz, gpu, d, data.mvoutf, initCond);
short_run_metropolis_sp(handle, r, mclocv, mcin, chain_length, sd, &mcloc,
samples, &accepted_samples, sz, gpu, d, data.mvoutf);
acc_ratio_c = accepted_samples/(float)chain_length;
acc_error_c = fabs(acc_ratio_c - target_a);
if(acc_error_c < min_error) // accept the current sd
{
best_sd = sd;
min_error = acc_error_c;
best_acc_ratio = acc_ratio_c;
fprintf(stdout, "\t\tAccepted: rwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
best_sd, best_acc_ratio, min_error);
}else{
fprintf(stdout, "\t\trwsdf = %5.3f, acceptance = %4.3f, error = %4.3f\n",
sd, acc_ratio_c, acc_error_c);
}
if(min_error < error_tolerance)
break;
sd *= acc_ratio_c/target_a;
}
mct->rwsdf = best_sd;
fprintf(stdout, "Tuning finished. Selected rwsdf = %5.3f\n\n", mct->rwsdf);
cublasDestroy(handle);
free(samples);
cudaFree(d.samples);
cudaFree(d.data);
cudaFree(d.cuLhood);
cudaFree(d.lhood);
free_mcmc_vectors_sp(mclocv, mcin);
}
void short_run_burn_in_sp(cublasHandle_t handle, gsl_rng *r, mcmc_int_v mclocv, mcmc_str mcin, float sd, mcmc_int *mcloc,
sz_str sz, gpu_v_str gpu, dev_v_str d, float *host_lhoodf, float *initCond)
{
int i, dim_idx;
float plhood, clhood;
out_str res;
// initialize burn in sequence
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++)
mclocv.currentf[dim_idx] = 0;
clhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.currentf, sz.samples, d, host_lhoodf, &res);
// calculate the current posterior
mcloc->cposteriorf = log_prior_sp(mclocv.currentf, mcin) + clhood;
// start burn-in
for(i=1; i<mcin.burnin; i++)
{
for(dim_idx = 0; dim_idx < mcin.ddata; dim_idx++){
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx]
+ (float) gsl_ran_gaussian_ziggurat(r, (double)sd); // random walk using Marsaglia-Tsang ziggurat algorithm
}
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef) // decide if you accept the proposed theta or not
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf; // make proposed posterior the current
}
}
}
void short_run_metropolis_sp(cublasHandle_t handle, gsl_rng *r, mcmc_int_v mclocv, mcmc_str mcin, int chain_length, float sd,
mcmc_int *mcloc, float *samples, int *accepted_samples, sz_str sz,
gpu_v_str gpu, dev_v_str d, float *host_lhoodf)
{
int i, dim_idx;
float plhood;
out_str res;
// start metropolis
for(i=0; i < chain_length; i++){
for(dim_idx = 0; dim_idx < mcin.ddata; dim_idx++){
mclocv.proposedf[dim_idx] = mclocv.currentf[dim_idx]
+ (float) gsl_ran_gaussian_ziggurat(r, (double)sd); // random walk using Marsaglia-Tsang ziggurat algorithm
}
plhood = gpu_likelihood_f(handle, mcin, gpu, mclocv.proposedf, sz.samples, d, host_lhoodf, &res);
mcloc->acceptancef = acceptance_ratio_sp(mclocv, mcloc, mcin, plhood);
mcloc->uf = gsl_rng_uniform(r);
if(log(mcloc->uf) <= mcloc->acceptancef) // decide if you accept the proposed theta or not
{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
mclocv.currentf[dim_idx] = mclocv.proposedf[dim_idx];
samples[i*mcin.ddata + dim_idx] = mclocv.proposedf[dim_idx];
}
mcloc->cposteriorf = mcloc->pposteriorf; // make proposed posterior the current
*accepted_samples += 1;
}else{
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){
samples[i*mcin.ddata + dim_idx] = mclocv.currentf[dim_idx];
}
}
}
}
float acceptance_ratio_sp(mcmc_int_v mclocv, mcmc_int *mcloc, mcmc_str mcin, float plhood)
{
float log_ratio;
mcloc->pposteriorf = log_prior_sp(mclocv.proposedf, mcin) + plhood;
log_ratio = mcloc->pposteriorf - mcloc->cposteriorf;
return log_ratio;
}
float log_prior_sp(float *sample, mcmc_str mcin)
{
float log_prob = 0;
int dim_idx;
for(dim_idx=0; dim_idx<mcin.ddata; dim_idx++){ //assuming iid priors
log_prob += log(gsl_ran_gaussian_pdf(sample[dim_idx], PRIOR_SD));
}
return log_prob;
}
#endif // __MCMC_GPU_SP_CU__ |
cc36eff0daa7bdee06ea70e1abb3497a7fb95ad4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d ,blockDim.x: %d, blockDim.y: %d, gridDim.x: %d, gridDim.y: %d\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
int main()
{
int nx, ny, nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx/ block.x, ny/block.y, nz/block.z);
print_details << <grid, block >> > ();
hipDeviceSynchronize();
hipDeviceReset();
return 0;
} | cc36eff0daa7bdee06ea70e1abb3497a7fb95ad4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d ,blockDim.x: %d, blockDim.y: %d, gridDim.x: %d, gridDim.y: %d\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
int main()
{
int nx, ny, nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx/ block.x, ny/block.y, nz/block.z);
print_details << <grid, block >> > ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
1c1e195f9861ee29c4f6c209c95acee7a0726f8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmergebicgstab.cu, normal z -> d, Mon Jun 25 18:24:24 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgstab_1_kernel(
int num_rows,
int num_cols,
double beta,
double omega,
double *r,
double *v,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
omega double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
v magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
double omega,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgstab_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_2_kernel(
int num_rows,
int num_cols,
double alpha,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
v magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgstab_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_3_kernel(
int num_rows,
int num_cols,
double alpha,
double omega,
double *p,
double *s,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
omega double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
s magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
double omega,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgstab_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_4_kernel(
int num_rows,
int num_cols,
double alpha,
double omega,
double *y,
double *z,
double *s,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
omega double
scalar
@param[in]
y magmaDouble_ptr
vector
@param[in]
z magmaDouble_ptr
vector
@param[in]
s magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
double omega,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgstab_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
| 1c1e195f9861ee29c4f6c209c95acee7a0726f8a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmergebicgstab.cu, normal z -> d, Mon Jun 25 18:24:24 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgstab_1_kernel(
int num_rows,
int num_cols,
double beta,
double omega,
double *r,
double *v,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
omega double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
v magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
double omega,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dbicgstab_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_2_kernel(
int num_rows,
int num_cols,
double alpha,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
v magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dbicgstab_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_3_kernel(
int num_rows,
int num_cols,
double alpha,
double omega,
double *p,
double *s,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
omega double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
s magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
double omega,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dbicgstab_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dbicgstab_4_kernel(
int num_rows,
int num_cols,
double alpha,
double omega,
double *y,
double *z,
double *s,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
omega double
scalar
@param[in]
y magmaDouble_ptr
vector
@param[in]
z magmaDouble_ptr
vector
@param[in]
s magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
double omega,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dbicgstab_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
|
80f0d6ab7c5c370c6b0dc3e9fdd362028cf8459d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void PowerInterleaved(float4 *src, float4 *dest) {
const size_t i = blockDim.x * blockIdx.x + threadIdx.x;
// Cross pols
dest[i].x += src[i].x * src[i].x + src[i].y * src[i].y;
dest[i].y += src[i].z * src[i].z + src[i].w * src[i].w;
// Parallel pols
dest[i].z += src[i].x * src[i].z + src[i].y * src[i].w;
dest[i].w += src[i].y * src[i].z - src[i].x * src[i].w;
} | 80f0d6ab7c5c370c6b0dc3e9fdd362028cf8459d.cu | #include "includes.h"
__global__ void PowerInterleaved(float4 *src, float4 *dest) {
const size_t i = blockDim.x * blockIdx.x + threadIdx.x;
// Cross pols
dest[i].x += src[i].x * src[i].x + src[i].y * src[i].y;
dest[i].y += src[i].z * src[i].z + src[i].w * src[i].w;
// Parallel pols
dest[i].z += src[i].x * src[i].z + src[i].y * src[i].w;
dest[i].w += src[i].y * src[i].z - src[i].x * src[i].w;
} |
e015e469a61c2e634564a99e3661f766aa95fbc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Adapted from https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialUpSamplingNearest.cu
*
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert,
* Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert,
* Samy Bengio, Johnny Mariethoz)
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of NEC Laboratories American and IDIAP Research
* Institute nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe2/core/context_gpu.h"
#include "upsample_op.h"
namespace caffe2 {
namespace {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(const float *input, float *output, long no_elements,
int scale_factor, int d1, int d2, int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
__global__ void downscale(float *gradInput_data, const float *gradOutput_data,
long no_elements, int scale_factor, int d1, int d2,
int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
} // namespace
template<>
bool UpsampleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
vector<TIndex> out_shape;
for (int i = 0; i < X.ndim(); ++i) {
out_shape.push_back(X.dim32(i));
}
out_shape[X.ndim() - 1] *= scale_;
out_shape[X.ndim() - 2] *= scale_;
Y->Resize(out_shape);
int d1;
int d2;
int d3;
if (X.ndim() == 3) {
d1 = Y->dim32(0);
d2 = Y->dim32(1);
d3 = Y->dim32(2);
} else {
d1 = Y->dim32(1);
d2 = Y->dim32(2);
d3 = Y->dim32(3);
}
long no_elements = Y->size();
const float *input_data = X.data<float>();
float *output_data = Y->mutable_data<float>();
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil(
(float)no_elements / (float)(n_xblocks * nthreads));
CAFFE_ENFORCE(n_yblocks <= 65535);
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
hipLaunchKernelGGL(( upscale), dim3(blocks), dim3(threads), 0, context_.cuda_stream(),
input_data, output_data, no_elements, scale_, d1, d2, d3);
return true;
}
template<>
bool UpsampleGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
float *gradInput_data = dX->mutable_data<float>();
const float *gradOutput_data = dY.data<float>();
int d1;
int d2;
int d3;
if (dX->ndim() == 3) {
d1 = dX->dim32(0);
d2 = dX->dim32(1);
d3 = dX->dim32(2);
} else {
d1 = dX->dim32(1);
d2 = dX->dim32(2);
d3 = dX->dim32(3);
}
long no_elements = dX->size();
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil(
(float)no_elements / (float)(n_xblocks * nthreads));
CAFFE_ENFORCE(n_yblocks <= 65535);
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
math::Set<float, CUDAContext>(no_elements, 0.f, gradInput_data, &context_);
hipLaunchKernelGGL(( downscale), dim3(blocks), dim3(threads), 0, context_.cuda_stream(),
gradInput_data, gradOutput_data, no_elements, scale_, d1, d2, d3);
return true;
}
REGISTER_CUDA_OPERATOR(Upsample,
UpsampleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(UpsampleGradient,
UpsampleGradientOp<float, CUDAContext>);
} // namespace caffe2
| e015e469a61c2e634564a99e3661f766aa95fbc1.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Adapted from https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialUpSamplingNearest.cu
*
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert,
* Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert,
* Samy Bengio, Johnny Mariethoz)
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of NEC Laboratories American and IDIAP Research
* Institute nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe2/core/context_gpu.h"
#include "upsample_op.h"
namespace caffe2 {
namespace {
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(const float *input, float *output, long no_elements,
int scale_factor, int d1, int d2, int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
__global__ void downscale(float *gradInput_data, const float *gradOutput_data,
long no_elements, int scale_factor, int d1, int d2,
int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
} // namespace
template<>
bool UpsampleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
vector<TIndex> out_shape;
for (int i = 0; i < X.ndim(); ++i) {
out_shape.push_back(X.dim32(i));
}
out_shape[X.ndim() - 1] *= scale_;
out_shape[X.ndim() - 2] *= scale_;
Y->Resize(out_shape);
int d1;
int d2;
int d3;
if (X.ndim() == 3) {
d1 = Y->dim32(0);
d2 = Y->dim32(1);
d3 = Y->dim32(2);
} else {
d1 = Y->dim32(1);
d2 = Y->dim32(2);
d3 = Y->dim32(3);
}
long no_elements = Y->size();
const float *input_data = X.data<float>();
float *output_data = Y->mutable_data<float>();
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil(
(float)no_elements / (float)(n_xblocks * nthreads));
CAFFE_ENFORCE(n_yblocks <= 65535);
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
upscale<<<blocks, threads, 0, context_.cuda_stream()>>>(
input_data, output_data, no_elements, scale_, d1, d2, d3);
return true;
}
template<>
bool UpsampleGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
float *gradInput_data = dX->mutable_data<float>();
const float *gradOutput_data = dY.data<float>();
int d1;
int d2;
int d3;
if (dX->ndim() == 3) {
d1 = dX->dim32(0);
d2 = dX->dim32(1);
d3 = dX->dim32(2);
} else {
d1 = dX->dim32(1);
d2 = dX->dim32(2);
d3 = dX->dim32(3);
}
long no_elements = dX->size();
// cuda blocks & threads:
long nthreads = 256;
// Max number of blocks: http://en.wikipedia.org/wiki/CUDA
// 65535 for SM 2.x, 2^32 -1 for >= 3.0
// TODO: When we move to SM 3.5 we should update this
long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535);
long n_yblocks = (long)ceil(
(float)no_elements / (float)(n_xblocks * nthreads));
CAFFE_ENFORCE(n_yblocks <= 65535);
dim3 blocks(n_xblocks, n_yblocks);
dim3 threads(nthreads);
math::Set<float, CUDAContext>(no_elements, 0.f, gradInput_data, &context_);
downscale<<<blocks, threads, 0, context_.cuda_stream()>>>(
gradInput_data, gradOutput_data, no_elements, scale_, d1, d2, d3);
return true;
}
REGISTER_CUDA_OPERATOR(Upsample,
UpsampleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(UpsampleGradient,
UpsampleGradientOp<float, CUDAContext>);
} // namespace caffe2
|
45a4ad56cf28b4a142eccfabe533ae5278dc0844.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
using namespace st;
__global__ void ConvolutionKernel_CUDA (double* kernel, double* paddedImage, double* outputImage, int outputImage_x_size, int outputImage_y_size, int outputImage_z_size, int padded_image_x_size, int padded_image_y_size, int padded_image_z_size, int kernel_size, int offset)
{
int outImageIndex = blockIdx.x * blockDim.x + threadIdx.x + offset; //global index
if (outImageIndex >= outputImage_x_size * outputImage_y_size * outputImage_z_size )
return;
double sum = 0;
int imageIndex_x = outImageIndex / (outputImage_z_size * outputImage_y_size);
int imageIndex_y = outImageIndex % (outputImage_z_size * outputImage_y_size) / padded_image_z_size;
int imageIndex_z = outImageIndex % outputImage_z_size;
for (int k = 0; k < kernel_size; k++)
{
for (int l = 0; l < kernel_size; l++)
{
for (int m = 0; m < kernel_size; m++)
{
sum += kernel[m + l * kernel_size + k * kernel_size * kernel_size] * paddedImage[(imageIndex_z + m) + ((imageIndex_y + l) * padded_image_z_size) + ((imageIndex_x + k)* padded_image_z_size * padded_image_y_size)];
}
}
}
outputImage[outImageIndex] = sum;
}
double*** Convolution_CUDA(double* kernel, double* paddedImage, int padded_image_x_size, int padded_image_y_size, padded_image_z_size, int kernel_size)
{
cout << "Entering sumOfProduct_CUDA" << endl;
double* outputImage = (double*) malloc(outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(double));
int padding = kernel_size / 2;
int padded_image_x_size = 2 * padding + outputImage_x_size;
int padded_image_y_size = 2 * padding + outputImage_y_size;
int padded_image_z_size = 2 * padding + outputImage_z_size;
hipError_t errorcode;
double* dev_kernel;
double* dev_paddedImage;
double* dev_outputImage;
size_t free_mem, total_mem;
hipMemGetInfo(&free_mem, &total_mem);
cout << free_mem / (double)(1024 * 1024) << " " << total_mem / (double)(1024 * 1024) << endl;
cout << "Allocating " << (sizeof(*kernel) * kernel_size * kernel_size * kernel_size)/(double)(1024*1024) << " MB of memory on GPU for kernel" << endl;
cout << "Allocating " << (sizeof(*paddedImage) * padded_image_x_size * padded_image_y_size * padded_image_z_size)/(double)(1024*1024) << " MB of memory on GPU for paddedImage" << endl;
cout << "Allocating " << (sizeof(*outputImage) * outputImage_x_size * outputImage_y_size * outputImage_z_size)/(double)(1024*1024) << " MB of memory on GPU for outputImage" << endl;
//allocate memory on device
errorcode = hipMalloc((void**) &dev_kernel, kernel_size * kernel_size * kernel_size * sizeof(*dev_kernel));
errorcode = hipMalloc((void**) &dev_paddedImage, padded_image_x_size * padded_image_y_size * padded_image_z_size * sizeof(*dev_paddedImage));
errorcode = hipMalloc((void**) &dev_outputImage, outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(*dev_outputImage));
//cout << errorcode << endl;
//Copy host memory contents to device contents
hipMemcpy(dev_kernel, kernel, kernel_size * kernel_size * kernel_size * sizeof(*kernel), hipMemcpyHostToDevice);
hipMemcpy(dev_paddedImage, paddedImage, padded_image_x_size * padded_image_y_size * padded_image_z_size * sizeof(*paddedImage), hipMemcpyHostToDevice);
//prefer 48 KB L1
hipError_t drivererrorcode = hipCtxSetCacheConfig(hipFuncCachePreferL1);
//cout << drivererrorcode << endl;
int device;
hipDeviceProp_t device_prop;
hipGetDevice(&device);
hipGetDeviceProperties(&device_prop, device);
int threadsPerBlock = device_prop.maxThreadsDim[0];
//int threadsPerBlock = 32;
//int numBlocks = device_prop.multiProcessorCount;
int numBlocks = device_prop.maxGridSize[0];
//Run kernel repeatedly with offset since we cannot launch too many threads at once
for (int k = 0; k < outputImage_x_size * outputImage_y_size * outputImage_z_size; k+= numBlocks * threadsPerBlock) //Run kernel on groups of pixels at a time
{
hipLaunchKernelGGL(( ConvolutionKernel_CUDA), dim3(numBlocks) , dim3(threadsPerBlock) , 0, 0, dev_kernel, dev_paddedImage, dev_outputImage, outputImage_x_size, outputImage_y_size, outputImage_z_size, padded_image_x_size, padded_image_y_size, padded_image_z_size, kernel_size, k);
}
//Copy device memory contents back to host memory
hipMemcpy(outputImage, dev_outputImage, outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(*outputImage), hipMemcpyDeviceToHost);
cout << hipGetErrorString(hipGetLastError()) << endl;
//Block until all precious commands are complete
hipDeviceSynchronize();
hipFree(dev_kernel);
hipFree(dev_paddedImage);
hipFree(dev_outputImage);
//unflatten outputImage
for (int n = 0; n < outputImage_x_size * outputImage_y_size * outputImage_z_size; n++)
{
int k = n / (outputImage_z_size * outputImage_y_size);
int l = n % (outputImage_z_size * outputImage_y_size) / outputImage_z_size;
int m = n % outputImage_z_size;
output3DImage[k][l][m] = outputImage[n];
}
//Testing by making output equal to the input
/*for (int n = 0; n < outputImage_x_size * outputImage_y_size * outputImage_z_size; n++)
{
int k = n / (padded_image_z_size * padded_image_y_size);
int l = n % (padded_image_z_size * padded_image_y_size) / padded_image_z_size;
int m = n % padded_image_z_size;
if (k < padding || l < padding || m < padding || k >= outputImage_x_size || l >= outputImage_y_size || m >= outputImage_z_size)
continue;
else
output3DImage[k - padding][l - padding][m - padding] = paddedImage[n];
}*/
free(outputImage);
cout << "CUDA_Convolution done" << endl;
}
double* flattenImage(double*** image, int image_x_size, int image_y_size, int image_z_size)
{
double* flat_image = (double*) malloc(image_x_size * image_y_size * image_z_size * sizeof(double));
for (int k = 0; k < image_x_size; k++)
for (int l = 0; l < image_y_size; l++)
for (int m = 0; m < image_z_size; m++)
flattened_image[m + image_z_size * l + image_z_size * image_y_size * k] = paddedImage[k][l][m];
/*for (int n = 0; n < image_x_size * image_y_size * image_z_size; n++)
{
int k = n / (image_z_size * image_y_size);
int l = n % (image_z_size * image_y_size) / image_z_size;
int m = n % image_z_size;
cout << k << " " << l << " " << m << endl;
}*/
return flattened_image;
}
| 45a4ad56cf28b4a142eccfabe533ae5278dc0844.cu | #include <cuda.h>
#include <iostream>
using namespace st;
__global__ void ConvolutionKernel_CUDA (double* kernel, double* paddedImage, double* outputImage, int outputImage_x_size, int outputImage_y_size, int outputImage_z_size, int padded_image_x_size, int padded_image_y_size, int padded_image_z_size, int kernel_size, int offset)
{
int outImageIndex = blockIdx.x * blockDim.x + threadIdx.x + offset; //global index
if (outImageIndex >= outputImage_x_size * outputImage_y_size * outputImage_z_size )
return;
double sum = 0;
int imageIndex_x = outImageIndex / (outputImage_z_size * outputImage_y_size);
int imageIndex_y = outImageIndex % (outputImage_z_size * outputImage_y_size) / padded_image_z_size;
int imageIndex_z = outImageIndex % outputImage_z_size;
for (int k = 0; k < kernel_size; k++)
{
for (int l = 0; l < kernel_size; l++)
{
for (int m = 0; m < kernel_size; m++)
{
sum += kernel[m + l * kernel_size + k * kernel_size * kernel_size] * paddedImage[(imageIndex_z + m) + ((imageIndex_y + l) * padded_image_z_size) + ((imageIndex_x + k)* padded_image_z_size * padded_image_y_size)];
}
}
}
outputImage[outImageIndex] = sum;
}
double*** Convolution_CUDA(double* kernel, double* paddedImage, int padded_image_x_size, int padded_image_y_size, padded_image_z_size, int kernel_size)
{
cout << "Entering sumOfProduct_CUDA" << endl;
double* outputImage = (double*) malloc(outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(double));
int padding = kernel_size / 2;
int padded_image_x_size = 2 * padding + outputImage_x_size;
int padded_image_y_size = 2 * padding + outputImage_y_size;
int padded_image_z_size = 2 * padding + outputImage_z_size;
cudaError_t errorcode;
double* dev_kernel;
double* dev_paddedImage;
double* dev_outputImage;
size_t free_mem, total_mem;
cudaMemGetInfo(&free_mem, &total_mem);
cout << free_mem / (double)(1024 * 1024) << " " << total_mem / (double)(1024 * 1024) << endl;
cout << "Allocating " << (sizeof(*kernel) * kernel_size * kernel_size * kernel_size)/(double)(1024*1024) << " MB of memory on GPU for kernel" << endl;
cout << "Allocating " << (sizeof(*paddedImage) * padded_image_x_size * padded_image_y_size * padded_image_z_size)/(double)(1024*1024) << " MB of memory on GPU for paddedImage" << endl;
cout << "Allocating " << (sizeof(*outputImage) * outputImage_x_size * outputImage_y_size * outputImage_z_size)/(double)(1024*1024) << " MB of memory on GPU for outputImage" << endl;
//allocate memory on device
errorcode = cudaMalloc((void**) &dev_kernel, kernel_size * kernel_size * kernel_size * sizeof(*dev_kernel));
errorcode = cudaMalloc((void**) &dev_paddedImage, padded_image_x_size * padded_image_y_size * padded_image_z_size * sizeof(*dev_paddedImage));
errorcode = cudaMalloc((void**) &dev_outputImage, outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(*dev_outputImage));
//cout << errorcode << endl;
//Copy host memory contents to device contents
cudaMemcpy(dev_kernel, kernel, kernel_size * kernel_size * kernel_size * sizeof(*kernel), cudaMemcpyHostToDevice);
cudaMemcpy(dev_paddedImage, paddedImage, padded_image_x_size * padded_image_y_size * padded_image_z_size * sizeof(*paddedImage), cudaMemcpyHostToDevice);
//prefer 48 KB L1
CUresult drivererrorcode = cuCtxSetCacheConfig(CU_FUNC_CACHE_PREFER_L1);
//cout << drivererrorcode << endl;
int device;
cudaDeviceProp device_prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&device_prop, device);
int threadsPerBlock = device_prop.maxThreadsDim[0];
//int threadsPerBlock = 32;
//int numBlocks = device_prop.multiProcessorCount;
int numBlocks = device_prop.maxGridSize[0];
//Run kernel repeatedly with offset since we cannot launch too many threads at once
for (int k = 0; k < outputImage_x_size * outputImage_y_size * outputImage_z_size; k+= numBlocks * threadsPerBlock) //Run kernel on groups of pixels at a time
{
ConvolutionKernel_CUDA<<< numBlocks , threadsPerBlock >>>(dev_kernel, dev_paddedImage, dev_outputImage, outputImage_x_size, outputImage_y_size, outputImage_z_size, padded_image_x_size, padded_image_y_size, padded_image_z_size, kernel_size, k);
}
//Copy device memory contents back to host memory
cudaMemcpy(outputImage, dev_outputImage, outputImage_x_size * outputImage_y_size * outputImage_z_size * sizeof(*outputImage), cudaMemcpyDeviceToHost);
cout << cudaGetErrorString(cudaGetLastError()) << endl;
//Block until all precious commands are complete
cudaThreadSynchronize();
cudaFree(dev_kernel);
cudaFree(dev_paddedImage);
cudaFree(dev_outputImage);
//unflatten outputImage
for (int n = 0; n < outputImage_x_size * outputImage_y_size * outputImage_z_size; n++)
{
int k = n / (outputImage_z_size * outputImage_y_size);
int l = n % (outputImage_z_size * outputImage_y_size) / outputImage_z_size;
int m = n % outputImage_z_size;
output3DImage[k][l][m] = outputImage[n];
}
//Testing by making output equal to the input
/*for (int n = 0; n < outputImage_x_size * outputImage_y_size * outputImage_z_size; n++)
{
int k = n / (padded_image_z_size * padded_image_y_size);
int l = n % (padded_image_z_size * padded_image_y_size) / padded_image_z_size;
int m = n % padded_image_z_size;
if (k < padding || l < padding || m < padding || k >= outputImage_x_size || l >= outputImage_y_size || m >= outputImage_z_size)
continue;
else
output3DImage[k - padding][l - padding][m - padding] = paddedImage[n];
}*/
free(outputImage);
cout << "CUDA_Convolution done" << endl;
}
double* flattenImage(double*** image, int image_x_size, int image_y_size, int image_z_size)
{
double* flat_image = (double*) malloc(image_x_size * image_y_size * image_z_size * sizeof(double));
for (int k = 0; k < image_x_size; k++)
for (int l = 0; l < image_y_size; l++)
for (int m = 0; m < image_z_size; m++)
flattened_image[m + image_z_size * l + image_z_size * image_y_size * k] = paddedImage[k][l][m];
/*for (int n = 0; n < image_x_size * image_y_size * image_z_size; n++)
{
int k = n / (image_z_size * image_y_size);
int l = n % (image_z_size * image_y_size) / image_z_size;
int m = n % image_z_size;
cout << k << " " << l << " " << m << endl;
}*/
return flattened_image;
}
|
7c3021fdbf1532132733ba5df6395b973859d4a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/types.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <cmath>
#include <math_constants.h>
namespace {
}
__global__ void self_occ_dtc_cuda_kernel(
const torch::PackedTensorAccessor<float,2,torch::RestrictPtrTraits,size_t> epp1,
const torch::PackedTensorAccessor<float,2,torch::RestrictPtrTraits,size_t> epp2,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2dsrch_v1_batch,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2d_v1_batch,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2d_v2_batch,
const torch::PackedTensorAccessor<float,3,torch::RestrictPtrTraits,size_t> srh_distance,
torch::PackedTensorAccessor<int,4,torch::RestrictPtrTraits,size_t> occ_selector,
const float minsr_dist,
const float minoc_dist,
const int bz,
const int h,
const int w
) {
int x;
int y;
float dx;
float dy;
int xsign;
int ysign;
int xx;
int xy;
int yy;
int yx;
float D;
int cty;
int comx;
int comy;
for(int i = threadIdx.x; i < h * w; i = i + blockDim.x){
y = i / w;
x = i - y * w;
if (srh_distance[blockIdx.x][y][x] > minsr_dist){
dx = pts2dsrch_v1_batch[blockIdx.x][y][x][0] - float(x);
dy = pts2dsrch_v1_batch[blockIdx.x][y][x][1] - float(y);
if (dx > 0){
xsign = 1;
}
else{
xsign = -1;
}
if (dy > 0){
ysign = 1;
}
else{
ysign = -1;
}
dx = abs(dx);
dy = abs(dy);
if (dx > dy){
xx = xsign;
xy = 0;
yx = 0;
yy = ysign;
}
else{
D = dx;
dx = dy;
dy = D;
xx = 0;
xy = ysign;
yx = xsign;
yy = 0;
}
D = 2 * dy - dx;
cty = 0;
for(int ctx = 0; ctx < ceil(dx) + 1; ctx = ctx + 1){
comx = x + ctx * xx + cty * yx;
comy = y + ctx * xy + cty * yy;
if ((comx < 0) || (comx >= w-1) || (comy < 0) || (comy >= h-1)){
break;
}
if (D >= 0){
cty += 1;
D -= 2 * dx;
}
D += 2 * dy;
if ((occ_selector[blockIdx.x][0][comy][comx] == 0) && (ctx > 0)){
if (
((pts2d_v1_batch[blockIdx.x][y][x][0] - epp1[blockIdx.x][0]) * (float(comx) - pts2d_v1_batch[blockIdx.x][y][x][0]) + (pts2d_v1_batch[blockIdx.x][y][x][1] - epp1[blockIdx.x][1]) * (float(comy) - pts2d_v1_batch[blockIdx.x][y][x][1])) *
((pts2d_v2_batch[blockIdx.x][y][x][0] - epp2[blockIdx.x][0]) * (pts2d_v2_batch[blockIdx.x][comy][comx][0] - pts2d_v2_batch[blockIdx.x][y][x][0]) + (pts2d_v2_batch[blockIdx.x][y][x][1] - epp2[blockIdx.x][1]) * (pts2d_v2_batch[blockIdx.x][comy][comx][1] - pts2d_v2_batch[blockIdx.x][y][x][1]))
< 0
){
if (sqrt(pow(pts2d_v2_batch[blockIdx.x][comy][comx][0] - pts2d_v2_batch[blockIdx.x][y][x][0], 2) + pow(pts2d_v2_batch[blockIdx.x][comy][comx][1] - pts2d_v2_batch[blockIdx.x][y][x][1], 2) + 1e-10) < minoc_dist){
occ_selector[blockIdx.x][0][comy][comx] = 1;
}
}
}
}
}
}
return;
}
void self_occ_dtc_cuda(
torch::Tensor epp1,
torch::Tensor epp2,
torch::Tensor pts2dsrch_v1_batch,
torch::Tensor pts2d_v1_batch,
torch::Tensor pts2d_v2_batch,
torch::Tensor srh_distance,
torch::Tensor occ_selector,
float minsr_dist,
float minoc_dist,
int bz,
int h,
int w
){
const int threads = 256;
hipLaunchKernelGGL(( self_occ_dtc_cuda_kernel), dim3(bz), dim3(threads), 0, 0,
epp1.packed_accessor<float,2,torch::RestrictPtrTraits,size_t>(),
epp2.packed_accessor<float,2,torch::RestrictPtrTraits,size_t>(),
pts2dsrch_v1_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
pts2d_v1_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
pts2d_v2_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
srh_distance.packed_accessor<float,3,torch::RestrictPtrTraits,size_t>(),
occ_selector.packed_accessor<int,4,torch::RestrictPtrTraits,size_t>(),
minsr_dist,
minoc_dist,
bz,
h,
w
);
return;
} | 7c3021fdbf1532132733ba5df6395b973859d4a5.cu | #include <torch/types.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <cmath>
#include <math_constants.h>
namespace {
}
__global__ void self_occ_dtc_cuda_kernel(
const torch::PackedTensorAccessor<float,2,torch::RestrictPtrTraits,size_t> epp1,
const torch::PackedTensorAccessor<float,2,torch::RestrictPtrTraits,size_t> epp2,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2dsrch_v1_batch,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2d_v1_batch,
const torch::PackedTensorAccessor<float,4,torch::RestrictPtrTraits,size_t> pts2d_v2_batch,
const torch::PackedTensorAccessor<float,3,torch::RestrictPtrTraits,size_t> srh_distance,
torch::PackedTensorAccessor<int,4,torch::RestrictPtrTraits,size_t> occ_selector,
const float minsr_dist,
const float minoc_dist,
const int bz,
const int h,
const int w
) {
int x;
int y;
float dx;
float dy;
int xsign;
int ysign;
int xx;
int xy;
int yy;
int yx;
float D;
int cty;
int comx;
int comy;
for(int i = threadIdx.x; i < h * w; i = i + blockDim.x){
y = i / w;
x = i - y * w;
if (srh_distance[blockIdx.x][y][x] > minsr_dist){
dx = pts2dsrch_v1_batch[blockIdx.x][y][x][0] - float(x);
dy = pts2dsrch_v1_batch[blockIdx.x][y][x][1] - float(y);
if (dx > 0){
xsign = 1;
}
else{
xsign = -1;
}
if (dy > 0){
ysign = 1;
}
else{
ysign = -1;
}
dx = abs(dx);
dy = abs(dy);
if (dx > dy){
xx = xsign;
xy = 0;
yx = 0;
yy = ysign;
}
else{
D = dx;
dx = dy;
dy = D;
xx = 0;
xy = ysign;
yx = xsign;
yy = 0;
}
D = 2 * dy - dx;
cty = 0;
for(int ctx = 0; ctx < ceil(dx) + 1; ctx = ctx + 1){
comx = x + ctx * xx + cty * yx;
comy = y + ctx * xy + cty * yy;
if ((comx < 0) || (comx >= w-1) || (comy < 0) || (comy >= h-1)){
break;
}
if (D >= 0){
cty += 1;
D -= 2 * dx;
}
D += 2 * dy;
if ((occ_selector[blockIdx.x][0][comy][comx] == 0) && (ctx > 0)){
if (
((pts2d_v1_batch[blockIdx.x][y][x][0] - epp1[blockIdx.x][0]) * (float(comx) - pts2d_v1_batch[blockIdx.x][y][x][0]) + (pts2d_v1_batch[blockIdx.x][y][x][1] - epp1[blockIdx.x][1]) * (float(comy) - pts2d_v1_batch[blockIdx.x][y][x][1])) *
((pts2d_v2_batch[blockIdx.x][y][x][0] - epp2[blockIdx.x][0]) * (pts2d_v2_batch[blockIdx.x][comy][comx][0] - pts2d_v2_batch[blockIdx.x][y][x][0]) + (pts2d_v2_batch[blockIdx.x][y][x][1] - epp2[blockIdx.x][1]) * (pts2d_v2_batch[blockIdx.x][comy][comx][1] - pts2d_v2_batch[blockIdx.x][y][x][1]))
< 0
){
if (sqrt(pow(pts2d_v2_batch[blockIdx.x][comy][comx][0] - pts2d_v2_batch[blockIdx.x][y][x][0], 2) + pow(pts2d_v2_batch[blockIdx.x][comy][comx][1] - pts2d_v2_batch[blockIdx.x][y][x][1], 2) + 1e-10) < minoc_dist){
occ_selector[blockIdx.x][0][comy][comx] = 1;
}
}
}
}
}
}
return;
}
void self_occ_dtc_cuda(
torch::Tensor epp1,
torch::Tensor epp2,
torch::Tensor pts2dsrch_v1_batch,
torch::Tensor pts2d_v1_batch,
torch::Tensor pts2d_v2_batch,
torch::Tensor srh_distance,
torch::Tensor occ_selector,
float minsr_dist,
float minoc_dist,
int bz,
int h,
int w
){
const int threads = 256;
self_occ_dtc_cuda_kernel<<<bz, threads>>>(
epp1.packed_accessor<float,2,torch::RestrictPtrTraits,size_t>(),
epp2.packed_accessor<float,2,torch::RestrictPtrTraits,size_t>(),
pts2dsrch_v1_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
pts2d_v1_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
pts2d_v2_batch.packed_accessor<float,4,torch::RestrictPtrTraits,size_t>(),
srh_distance.packed_accessor<float,3,torch::RestrictPtrTraits,size_t>(),
occ_selector.packed_accessor<int,4,torch::RestrictPtrTraits,size_t>(),
minsr_dist,
minoc_dist,
bz,
h,
w
);
return;
} |
ea5feb9150b4a8cc6b97ba063b594386bbf80b14.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_map_kernels.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
namespace SySal { namespace GPU {
__global__ void curvaturemap_kernel(int *pXYCurv, int *pZCurv, int span, float xy_curvature, float z_curvature)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int span2 = span >> 1;
if (ix < span)
{
int dx = ix - span2;
int dx2 = dx * dx;
pXYCurv[ix] = xy_curvature * dx2;
pZCurv[ix] = z_curvature * dx2;
}
}
__global__ void correctcurvature_kernel(IntCluster *pC, short *pZC, int camrotsin, int camrotcos, int *pCurv, int *pCurvY, int *pZCurvX, int *pZCurvY, int dmagdx, int dmagdy, int total, int w2, int h2)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < total)
{
IntCluster *pc = pC + ix;
int x = pc->X;
int y = pc->Y;
int x2 = x - w2;
int y2 = y - h2;
int d = (pCurv[x] + pCurv[y]);
pc->X += (((x2 * d) >> XY_CURVATURE_SHIFT) + ((dmagdy * y2 * x2) >> XY_MAGNIFICATION_SHIFT)) + ((x2 * camrotcos) >> FRACT_RESCALE_SHIFT) - ((y2 * camrotsin) >> FRACT_RESCALE_SHIFT);
pc->Y += (((y2 * d) >> XY_CURVATURE_SHIFT) + ((dmagdx * y2 * x2) >> XY_MAGNIFICATION_SHIFT)) + ((x2 * camrotsin) >> FRACT_RESCALE_SHIFT) + ((y2 * camrotcos) >> FRACT_RESCALE_SHIFT);
pZC[ix] = ((pZCurvX[x] + pZCurvY[y]) >> Z_CURVATURE_SHIFT);
}
}
__global__ void setXYZs_kernel(short *pCX, short *pCY, short *pCZ, int total, int img, short *pX, short *pY, short z)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int x = pX[img];
int y = pY[img];
if (ix < total)
{
pCX[ix] = x;
pCY[ix] = y;
pCZ[ix] += z;
}
}
__global__ void correctdemag_kernel(IntCluster *pC, int cblock, int imgclusters, int demag, int width, int height)
{
int w2 = width >> 1;
int h2 = height >> 1;
int ic, imin, imax;
imin = (threadIdx.x + blockIdx.x * blockDim.x) * cblock;
imax = imin + cblock;
if (imax > imgclusters) imax = imgclusters;
IntCluster *pc = pC + imin;
for (ic = imin; ic < imax; ic++)
{
pc->X = (((pc->X - w2) * demag) >> DEMAG_SHIFT) + pc->X;
pc->Y = (((pc->Y - h2) * demag) >> DEMAG_SHIFT) + pc->Y;
pc++;
}
}
__global__ void resetcounts_kernel(int *pmapcounts, int deltas2, int *pbest)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int i;
if (ix < deltas2) pmapcounts[ix] = 0;
if (ix == 0) *pbest = 0;//0xffffffff;
}
__global__ void makedeltas_kernel(int *pDeltas, int tol, int deltasx, int deltasy, short *pdx, short *pdy, int img)
{
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = tol * (i - deltasx / 2) + pdx[img];
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = tol * (i - deltasy / 2) + pdy[img];
}
__global__ void makedeltas_fromshift_kernel(int *pDeltas, int tol, int deltasx, int deltasy, int *pBestDeltas, int *pBest, int bestdeltasx)
{
int best = *pBest & 0xffff;
int biy = best / bestdeltasx;
int bix = best % bestdeltasx;
int bestdx = pBestDeltas[bix];
int bestdy = pBestDeltas[biy + bestdeltasx];
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = tol * (i - deltasx / 2) + bestdx;
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = tol * (i - deltasy / 2) + bestdy;
}
__global__ void makefinaldeltas_fromshift_kernel(int *pDeltas, int tol, int *pBestDeltas, int *pBest, int bestdeltasx, short *px, short *py, short *pdx, short *pdy, int img, int totalimg)
{
int best = *pBest & 0xffff;
int bestdx = pBestDeltas[best % bestdeltasx];
int bestdy = pBestDeltas[best / bestdeltasx + bestdeltasx];
pDeltas[0] = bestdx;
pDeltas[1] = bestdy;
int corrx = bestdx - pdx[img];
int corry = bestdy - pdy[img];
px[img] += corrx;
py[img] += corry;
pdx[img] = bestdx;
pdy[img] = bestdy;
// THIS WAS A BUG: NO COMPENSATION OF CORRECTIONS! DO NOT RESTORE!
if (img < totalimg - 1)
{
pdx[img + 1] -= corrx;
pdy[img + 1] -= corry;
}
}
__global__ void rescaleshifts_kernel(short *px, short *py, short *pdx, short *pdy, int refimg, int totalimg)
{
int dx = pdx[refimg];
int dy = pdy[refimg];
int img;
for (img = 0; img < totalimg; img++)
{
px[img] -= dx;
py[img] -= dy;
pdx[img] -= dx;
pdy[img] -= dy;
}
}
__global__ void findbest_kernel(int *pMapCounts, int deltas2, int step, int *pBest)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x) * 2 * step;
if (step == 1)
{
if (idx < deltas2) pBest[idx] = idx;
if (idx + step < deltas2) pBest[idx + step] = idx + step;
}
if (idx < deltas2 && idx + step < deltas2)
{
if (pMapCounts[idx] < pMapCounts[idx + step])
{
pBest[idx] = pBest[idx + step];
pMapCounts[idx] = pMapCounts[idx + step];
}
}
}
__global__ void sumcounts_kernel(int *pMapCounts, int deltas2, int total, int step)
{
int idelta = threadIdx.y + blockIdx.y * blockDim.y;
int idx = ((threadIdx.x + blockIdx.x * blockDim.x) * step) << 1;
if (idelta < deltas2 && (idx + step) < total)
pMapCounts[idx * deltas2 + idelta] += pMapCounts[(idx + step) * deltas2 + idelta];
}
__global__ void maphash_kernel(IntCluster *pC, int nc, int clusterblocksize, int i, Cell *pCell, IntCluster **pCellContents, int cellsize, int maxcellcontent, int nx, int ny)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
//for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int a = atomicAdd(&qCell->Count, 1);
if (a > maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntCluster **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[a] = pC + ic;
}
}
}
__global__ void maphash_minarea_kernel(IntCluster *pC, int nc, int clusterblocksize, int i, Cell *pCell, IntCluster **pCellContents, int cellsize, int maxcellcontent, int nx, int ny, int minarea)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
//for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
if (pC[ic].Area < minarea) return; //continue;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int a = atomicAdd(&qCell->Count, 1);
if (a > maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntCluster **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[a] = pC + ic;
}
}
}
__global__ void clearhash_kernel(IntCluster *pC, int nc, int clusterblocksize, Cell *pCell, int cellsize, int nx, int ny)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i, ic, ix, iy;
for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) continue;
pCell[iy * nx + ix].Count = 0;
}
}
__global__ void compactchains_kernel(IntChain *pCompact, int *pChainBase, IntChain *pOriginal, int *pChainCounts, int chainblocksize)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i;
int base = pChainBase[idx];
for (i = 0; i < pChainCounts[idx]; i++)
pCompact[base + i] = pOriginal[idx * chainblocksize + i];
}
__global__ void makechainwindow_kernel(ChainMapWindow *pChMapWnd, short *px, short *py, int imgs, int width, int height, float pxmicron, float pymicron, int maxcells, int mincellsize, ChainView *pChV, Cell *pCells, IntCluster **pCellContent, int maxcellcontent, int stagex, int stagey, ChainView *pChLastV)
{
int sumx = (px[0] * pxmicron);
int sumy = (py[0] * pymicron);
int i;
int minx = sumx;
int maxx = sumx;
int miny = sumy;
int maxy = sumy;
int w = abs(width * pxmicron);
int h = abs(height * pymicron);
for (i = 1; i < imgs; i++)
{
sumx = (px[i] * pxmicron);
if (sumx < minx) minx = sumx;
if (sumx > maxx) maxx = sumx;
sumy = (py[i] * pymicron);
if (sumy < miny) sumy = miny;
if (sumy > maxy) sumy = maxy;
}
minx -= (w / 2);
maxx += (w / 2);
miny -= (h / 2);
maxy += (h / 2);
int cells = 2 * pChV->Count;
if (cells > sqrt((float)maxcells)) cells = sqrt((float)maxcells);
if (cells < 1) cells = 1;
width = maxx - minx;
height = maxy - miny;
if (cells > width / mincellsize) cells = width / mincellsize;
if (cells > height / mincellsize) cells = height / mincellsize;
pChMapWnd->MinX = minx + stagex/* + (pChLastV ? pChLastV->DeltaX : 0)*/;
pChMapWnd->MaxX = maxx + stagex/* + (pChLastV ? pChLastV->DeltaX : 0)*/;
pChMapWnd->MinY = miny + stagey/* + (pChLastV ? pChLastV->DeltaY : 0)*/;
pChMapWnd->MaxY = maxy + stagey/* + (pChLastV ? pChLastV->DeltaY : 0)*/;
pChMapWnd->Width = width;
pChMapWnd->Height = height;
pChMapWnd->CellSize = __max(1, __max(width / cells, height / cells));
pChMapWnd->MaxCellContent = maxcellcontent;
pChMapWnd->NXCells = __max(1, width / pChMapWnd->CellSize);
pChMapWnd->NYCells = __max(1, height / pChMapWnd->CellSize);
pChMapWnd->pCells = pCells;
pChMapWnd->pChains = (IntChain **)(void *)pCellContent;
}
__global__ void maphashchain_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int chainblocksize, int i)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
IntChain *pC = pChV->Chains;
int nc = pChV->Count;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
Cell *pCell = pChMapWnd->pCells;
IntChain **pCellContents = pChMapWnd->pChains;
//for (i = 0; i < chainblocksize; i++)
{
ic = idx * chainblocksize + i;
if (ic >= nc) return;
ix = (pC[ic].AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = (pC[ic].AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int c = atomicAdd(&qCell->Count, 1);
if (c >= maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntChain **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[c] = pC + ic;
}
}
}
__global__ void makechaindeltas_kernel(int *pDeltas, int xytol, int ztol, int deltasx, int deltasy, int deltasz, ChainView *plastview, int xc, int yc, float xslant, float yslant, float dxdz, float dydz)
{
plastview = 0;
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = xytol * (i - deltasx / 2) + (plastview ? plastview->DeltaX : 0);
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = xytol * (i - deltasy / 2) + (plastview ? plastview->DeltaY : 0);
for (i = 0; i < deltasz; i++)
{
pDeltas[i + deltasx + deltasy] = ztol * (i - deltasz / 2) + (plastview ? (plastview->DeltaZ/* + (((int)(xslant * (xc - plastview->PositionX) + yslant * (yc - plastview->PositionY))) >> (XY_SCALE_SHIFT - Z_SCALE_SHIFT))*/) : 0);
pDeltas[i + deltasx + deltasy + deltasz] = (ztol * (i - deltasz / 2) * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));//(pDeltas[i + deltasx + deltasy] * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));
pDeltas[i + deltasx + deltasy + 2 * deltasz] = (ztol * (i - deltasz / 2) * dydz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));//(pDeltas[i + deltasx + deltasy] * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));
}
}
__global__ void makechaindeltas_fromshift_kernel(int *pDeltas, int xytol, int ztol, int deltasx, int deltasy, int deltasz, int *pBestDeltas, int *pBest, int bestdeltasx, int bestdeltasy, int bestdeltasz)
{
int best = (*pBest) & 0xffff;
int bdxy2 = bestdeltasx * bestdeltasy;
int biz = best / bdxy2;
int biy = (best % bdxy2) / bestdeltasx;
int bix = best % bestdeltasx;
int bestdx = pBestDeltas[bix] + pBestDeltas[biz + bestdeltasx + bestdeltasy + bestdeltasz];
int bestdy = pBestDeltas[biy + bestdeltasx] + pBestDeltas[biz + bestdeltasx + bestdeltasy + 2 * bestdeltasz];
int bestdz = pBestDeltas[biz + bestdeltasx + bestdeltasy];
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = xytol * (i - deltasx / 2) + bestdx;
for (i = 0; i < deltasy; i++)
pDeltas[deltasx + i] = xytol * (i - deltasy / 2) + bestdy;
for (i = 0; i < deltasz; i++)
pDeltas[deltasx + deltasy + i] = ztol * (i - deltasz / 2) + bestdz;
}
__global__ void negshift_viewchains_kernel(ChainView *pview, int *pDeltas, int deltasXY, int deltasZ, int *pD)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int inc = gridDim.x * blockDim.x;
int best = *pD & 0xffff;
int dix = pDeltas[best % deltasXY];
int diy = pDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
int diz = pDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
IntChain *pC = (IntChain *)(void *)((char *)(void *)pview + sizeof(ChainView));
while (idx < pview->Count)
{
pC[idx].AvgX -= dix;
pC[idx].AvgY -= diy;
pC[idx].AvgZ -= diz;
idx += inc;
}
}
__global__ void setchainviewheader_kernel(ChainMapHeader *pmaph, ChainView *pview, int px, int py, int pz, int *pDeltas, int deltasXY, int deltasZ, int *pD)
{
pview->PositionX = px;
pview->PositionY = py;
pview->PositionZ = pz;
pview->DeltaX = pview->DeltaY = pview->DeltaZ = 0;
/*
if (pDeltas == 0)
{
pview->DeltaX = pview->DeltaY = pview->DeltaZ = 0;
pmaph->Views = 1;
}
else
{
int best = (*pD) & 0xffff;
pview->DeltaX = pDeltas[best % deltasXY];
pview->DeltaY = pDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
pview->DeltaZ = pDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
pmaph->Views++;
}
*/
}
/*****************************/
__global__ void compact_kernel(int * pInt, int stride, int count, int * pOut)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) pOut[i] = pInt[i * stride];
}
__global__ void max_check_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
pInt[i] = __max(pInt[i], pInt[i + halftotal]);
}
__global__ void max_kernel(int * pInt, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
pInt[i] = __max(pInt[i], pInt[i + halftotal]);
}
__global__ void sum_check_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
pInt[i] += pInt[i + halftotal];
}
__global__ void sum_kernel(int * pInt, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
pInt[i] += pInt[i + halftotal];
}
__global__ void sum_check_multiple_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
{
i += blockIdx.y * total;
pInt[i] += pInt[i + halftotal];
}
}
__global__ void sum_multiple_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + (blockIdx.y * total);
pInt[i] += pInt[i + halftotal];
}
__global__ void shift_postfixid_kernel(int *pdest, int *psrc, int total)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= total) return;
pdest[id] = (psrc[id] << 16) | (id & 0xffff);
}
__global__ void split_and_index_kernel(int *paircomputer, int depth, IntPair *pairindices, int totalpairs)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= totalpairs) return;
int d;
int res = id + 1;
int countatlevel = 2;
int place = 0;
for (d = 1; d < depth; d++)
{
place <<= 1;
if (paircomputer[place] < res)
{
res -= paircomputer[place];
place++;
}
paircomputer -= countatlevel;
countatlevel <<= 1;
}
pairindices[id].Index1 = place;
pairindices[id].Index2 = res - 1;
}
__global__ void trymap2_prepare_clusters_kernel(IntCluster *pc, IntMapCluster *pmc, int totalclusters, int divider, int mingrainsize, int w2, int h2, int demag, int *pValidFlag)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int idd = id * divider;
if (idd >= totalclusters) return;
pmc += id;
pc += idd;
if (pc->Area < mingrainsize)
{
pmc->idoriginal = -1;
if (pValidFlag) pValidFlag[id] = 0;
return;
}
pmc->idoriginal = idd;
pmc->ibasex = (((pc->X - w2) * demag) >> DEMAG_SHIFT) + pc->X;
pmc->ibasey = (((pc->Y - h2) * demag) >> DEMAG_SHIFT) + pc->Y;
if (pValidFlag) pValidFlag[id] = 1;
}
__global__ void trymap2_shift_kernel(IntMapCluster *pmc, int totalmapclusters, int *pDeltaX, int *pDeltaY, int cellsize)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalmapclusters) return;
pmc += id;
pmc->icell = -1;
if (pmc->idoriginal < 0) return;
pmc->ishiftedx = pmc->ibasex + *pDeltaX;
pmc->icellx = pmc->ishiftedx / cellsize;
pmc->ishiftedy = pmc->ibasey + *pDeltaY;
pmc->icelly = pmc->ishiftedy / cellsize;
}
__global__ void trymap2_shiftmatch_kernel(IntMapCluster *pmc, IntPair *pPairs, int totalpairs, int *pDeltas, int cellsize, short nx, short ny, int *pmatchresult, int tol, Cell *pmapcell, IntCluster **pMapCellContent, int maxcellcontent)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
int ishiftedx = pmc->ibasex + pDeltas[blockIdx.y];
int ishiftedy = pmc->ibasey + pDeltas[gridDim.y + blockIdx.z];
int icellx = ishiftedx / cellsize;
int icelly = ishiftedy / cellsize;
pmatchresult += (blockIdx.z * gridDim.y + blockIdx.y) * totalpairs + id;
int imatchresult = 0;
if (icellx >= 0 && icellx < nx && icelly >= 0 && icelly < ny)
{
int icell = icelly * (int)nx + icellx;
pMapCellContent += icell * maxcellcontent;
short i = pmapcell[icell].Count;
while (--i >= 0)
{
IntCluster *pc2 = pMapCellContent[i];
imatchresult |= (abs(ishiftedx - pc2->X) < tol && abs(ishiftedy - pc2->Y) < tol) ? 1 : 0;
}
}
*pmatchresult = imatchresult;
}
__global__ void finalmap_cell_kernel(IntMapCluster *pmc, int totalmapclusters, Cell *pmapcell, int *pClustersInCell, int nx, int ny)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalmapclusters) return;
pmc += id;
pClustersInCell[id] = 0;
pmc->icell = -1;
if (pmc->idoriginal < 0) return;
int mapclusters = 0;
#pragma unroll 3
for (int iiy = -1; iiy <= 1; iiy++)
{
int icelly = pmc->icelly + iiy;
if (icelly < 0 || icelly >= ny) continue;
#pragma unroll 3
for (int iix = -1; iix <= 1; iix++)
{
int icellx = pmc->icellx + iix;
if (icellx < 0 || icellx >= nx) continue;
mapclusters += pmapcell[icelly * nx + icellx].Count;
}
}
pClustersInCell[id] = pmc->ipairblockcount = mapclusters;
}
__global__ void finalmap_match_kernel(IntMapCluster *pmc, IntPair *pPairs, int totalpairs, int *pmatchresult, int *pmatchmap, int tol, Cell *pmapcell, IntCluster **pMapCellContent, int maxcellcontent, int nx, int ny)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
if (pmc->idoriginal < 0) return;
if (pPairs[id].Index2 == 0) pmc->ipairblockstart = id;
int mapclusters = 0;
#pragma unroll 3
for (int iiy = -1; iiy <= 1; iiy++)
{
int icelly = pmc->icelly + iiy;
if (icelly < 0 || icelly >= ny) continue;
#pragma unroll 3
for (int iix = -1; iix <= 1; iix++)
{
int icellx = pmc->icellx + iix;
if (icellx < 0 || icellx >= nx) continue;
int inc = pmapcell[icelly * nx + icellx].Count;
if (mapclusters + inc <= pPairs[id].Index2)
{
mapclusters += inc;
}
else
{
int idc2 = (icelly * nx + icellx) * maxcellcontent + pPairs[id].Index2 - mapclusters;
IntCluster *pc2 = pMapCellContent[idc2];
int dist = __max(abs(pmc->ishiftedx - pc2->X), abs(pmc->ishiftedy - pc2->Y));
if (dist < tol)
{
pmatchresult[id] = dist;
pmatchmap[id] = idc2;
}
else
{
pmatchresult[id] = pmatchmap[id] = -1;
}
return;
}
}
}
}
__global__ void finalmap_optimize_kernel(IntCluster *pc, IntMapCluster *pmc, int clusteroffset, int totalclusters, int *pmatchresult, int *pmatchmap, IntCluster **pMapCellContent, IntCluster **pClusterChain)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalclusters) return;
pmc += id;
if (pmc->idoriginal < 0) return;
if (pmc->ipairblockcount <= 0) return;
int i = pmc->ipairblockcount - 1;
int ibest = i;
int iblockstart = pmc->ipairblockstart;
int d;
int dbest = pmatchresult[iblockstart + i];
while (--i >= 0)
{
d = pmatchresult[iblockstart + i];
if (d >= 0 && (dbest < 0 || dbest > d))
{
ibest = i;
dbest = d;
}
}
if (0/*KRYSS DISABLE CHAIN FORMATION 20140728 dbest >= 0*/)
{
IntCluster *pBest = pMapCellContent[pmatchmap[iblockstart + ibest]];
pBest->Area = -abs(pBest->Area);
pc += pmc->idoriginal;
pc->X += (pBest->X - pmc->ibasex);
pc->Y += (pBest->Y - pmc->ibasey);
pClusterChain[clusteroffset + pmc->idoriginal] = pBest;
}
else pClusterChain[clusteroffset + pmc->idoriginal] = 0;
}
__global__ void makechain_kernel(IntCluster *pC, int totalclusters, short w2, short h2, short *pClusterXs, short *pClusterYs, short *pClusterZs, int xslant, int yslant, IntCluster **pClusterChains, short minclusters, short minvol, float xtomicron, float ytomicron, int stagex, int stagey, IntChain *pChain, int viewtag, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalclusters) return;
pvalid[id] = pChain[id].Volume = pChain[id].Clusters = 0;
int avgx = 0;
int avgy = 0;
int avgz = 0;
IntCluster *pnc = pC + id;
if (pnc->Area < 0) return;
short clusters = 1;
int area = abs(pnc->Area);
int volume = area;
avgx = (pnc->X + pClusterXs[id]) * area;
avgy = (pnc->Y + pClusterYs[id]) * area;
avgz = area * (pClusterZs[id] - (( (xslant * (pnc->X - w2) + yslant * (pnc->Y - h2)) >> SLOPE_SHIFT)));
int ip;
while (pnc = pClusterChains[ip = pnc - pC])
{
ip = pnc - pC;
clusters++;
area = abs(pnc->Area);
volume += area;
avgx += area * (pnc->X + pClusterXs[ip]);
avgy += area * (pnc->Y + pClusterYs[ip]);
avgz += area * (pClusterZs[ip] - (( (xslant * (pnc->X - w2) + yslant * (pnc->Y - h2)) >> SLOPE_SHIFT)));
}
if (clusters >= minclusters && volume >= minvol)
{
IntChain *psC = pChain + id;
psC->Clusters = clusters;
avgx /= volume;
avgy /= volume;
avgz /= volume;
psC->Volume = volume;
psC->AvgX = ((avgx - w2) << XY_SCALE_SHIFT) * xtomicron + stagex;
psC->AvgY = ((avgy - h2) << XY_SCALE_SHIFT) * ytomicron + stagey;
psC->AvgZ = avgz;
psC->ViewTag = viewtag;
psC->Reserved = 0;
pvalid[id] = 1;
}
}
__global__ void maphashchain_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int divider)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx * divider >= pChV->Count) return;
IntChain *pC = pChV->Chains + idx * divider;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
int ix, iy;
ix = (pC->AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return;
iy = (pC->AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return;
Cell *qCell = pChMapWnd->pCells + iy * nx + ix;
int c = atomicAdd(&qCell->Count, 1);
if (c >= maxcellcontent) atomicExch(&qCell->Count, maxcellcontent);
else pChMapWnd->pChains[maxcellcontent * (iy * nx + ix) + c] = pC;
}
__global__ void clearhashchain2_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int divider)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx * divider >= pChV->Count) return;
IntChain *pC = pChV->Chains + idx * divider;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
int ix, iy;
ix = (pC->AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return;
iy = (pC->AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return;
pChMapWnd->pCells[iy * nx + ix].Count = 0;
}
__global__ void trymapchain_prepare_chains_kernel(IntChain *pc, IntMapChain *pmc, int totalchains, int minchainsize, int *pValidFlag)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalchains) return;
pmc += id;
pc += id;
if (pValidFlag) pValidFlag[id] = 0;
if (pc->Volume < minchainsize)
{
pmc->idoriginal = -1;
return;
}
pmc->idoriginal = id;
pmc->ibasex = pc->AvgX;
pmc->ibasey = pc->AvgY;
pmc->ibasez = pc->AvgZ;
if (pValidFlag) pValidFlag[id] = 1;
}
__global__ void trymapchain_shiftmatch_kernel(IntMapChain *pmc, IntPair *pPairs, int totalpairs, int *pMapCount, int *pDeltas, ChainMapWindow *pChMapWnd, int xytol, short zsteps, int ztol)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
int ishiftedx = pmc->ibasex + pDeltas[blockIdx.y];
int ishiftedy = pmc->ibasey + pDeltas[gridDim.y + blockIdx.z];
int ibasez = pmc->ibasez;
int icellx = (ishiftedx - pChMapWnd->MinX) / pChMapWnd->CellSize;
int icelly = (ishiftedy - pChMapWnd->MinY) / pChMapWnd->CellSize;
pMapCount += blockIdx.z * gridDim.y + blockIdx.y;
if (icellx >= 0 && icellx < pChMapWnd->NXCells && icelly >= 0 && icelly < pChMapWnd->NYCells)
{
int icell = icelly * (int)pChMapWnd->NXCells + icellx;
IntChain **pCellContent = pChMapWnd->pChains + icell * pChMapWnd->MaxCellContent;
short i = pChMapWnd->pCells[icell].Count;
for (int iz = zsteps - 1; iz >= 0; iz--)
{
bool hasmatch = 0;
while (--i >= 0)
{
IntChain *pc2 = pCellContent[i];
hasmatch = hasmatch || (abs(ishiftedx - pc2->AvgX) < xytol && abs(ishiftedy - pc2->AvgY) < xytol && abs(ibasez + pDeltas[gridDim.y + gridDim.z + iz] - pc2->AvgZ) < ztol);
}
if (hasmatch) atomicAdd(pMapCount + (iz * gridDim.y * gridDim.z), 1);
}
}
}
__global__ void make_finalchainshift_kernel(int *pDeltas, int *pRefineDeltas, int *pBest, int deltasXY)
{
int best = *pBest & 0xffff;
int dix = pRefineDeltas[best % deltasXY];
int diy = pRefineDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
int diz = pRefineDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
pDeltas[0] = dix;
pDeltas[1] = diy;
pDeltas[2] = diz;
}
__global__ void finalmapchain_cell_kernel(IntMapChain *pmc, IntPair *pPairs, int totalpairs, int *pDeltas, ChainMapWindow *pChMapWnd, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
pmc->ishiftedx = pmc->ibasex + pDeltas[0];
pmc->ishiftedy = pmc->ibasey + pDeltas[1];
pmc->ishiftedz = pmc->ibasez + pDeltas[2];
int icellx = (pmc->ishiftedx - pChMapWnd->MinX) / pChMapWnd->CellSize;
int icelly = (pmc->ishiftedy - pChMapWnd->MinY) / pChMapWnd->CellSize;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
if (icelly < 0 || icelly >= ny || icellx < 0 || icellx >= nx)
{
pvalid[pPairs[id].Index1] = 0;
return;
}
pvalid[pPairs[id].Index1] = pChMapWnd->pCells[pmc->icell = icelly * (int)nx + icellx].Count;
}
__global__ void finalmapchain_match_kernel(IntChain *pc, IntMapChain *pmc, IntPair *pPairs, int totalpairs, ChainMapWindow *pChMapWnd, int xytol, int ztol)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
IntChain *pc2 = pChMapWnd->pChains[pmc->icell * pChMapWnd->MaxCellContent + pPairs[id].Index2];
if (abs(pmc->ishiftedx - pc2->AvgX) < xytol && abs(pmc->ishiftedy - pc2->AvgY) < xytol && abs(pmc->ishiftedz - pc2->AvgZ) < ztol)
{
pc[pPairs[id].Index1].Volume = pc[pPairs[id].Index1].Clusters = 0;
}
}
__global__ void finalmapchain_filter_kernel(IntChain *pc, int totalchains, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalchains) return;
pvalid[id] = (pc[id].Volume > 0) ? 1 : 0;
}
__global__ void compactchains_kernel(IntChain *pcmpct, IntChain *pch, IntPair *pPairs, int totalpairs, ChainView *pChV)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id == 0) pChV->Count = totalpairs;
if (id >= totalpairs) return;
pcmpct[id] = pch[pPairs[id].Index1];
}
__global__ void negshift_viewchains_kernel(ChainView *pview, int *pDeltas)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= pview->Count) return;
int dix = pDeltas[0];
int diy = pDeltas[1];
int diz = pDeltas[2];
IntChain *pC = (IntChain *)(void *)((char *)(void *)pview + sizeof(ChainView)) + idx;
pC->AvgX -= dix;
pC->AvgY -= diy;
pC->AvgZ -= diz;
}
/*****************************/
} } | ea5feb9150b4a8cc6b97ba063b594386bbf80b14.cu | #include "gpu_map_kernels.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
namespace SySal { namespace GPU {
__global__ void curvaturemap_kernel(int *pXYCurv, int *pZCurv, int span, float xy_curvature, float z_curvature)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int span2 = span >> 1;
if (ix < span)
{
int dx = ix - span2;
int dx2 = dx * dx;
pXYCurv[ix] = xy_curvature * dx2;
pZCurv[ix] = z_curvature * dx2;
}
}
__global__ void correctcurvature_kernel(IntCluster *pC, short *pZC, int camrotsin, int camrotcos, int *pCurv, int *pCurvY, int *pZCurvX, int *pZCurvY, int dmagdx, int dmagdy, int total, int w2, int h2)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < total)
{
IntCluster *pc = pC + ix;
int x = pc->X;
int y = pc->Y;
int x2 = x - w2;
int y2 = y - h2;
int d = (pCurv[x] + pCurv[y]);
pc->X += (((x2 * d) >> XY_CURVATURE_SHIFT) + ((dmagdy * y2 * x2) >> XY_MAGNIFICATION_SHIFT)) + ((x2 * camrotcos) >> FRACT_RESCALE_SHIFT) - ((y2 * camrotsin) >> FRACT_RESCALE_SHIFT);
pc->Y += (((y2 * d) >> XY_CURVATURE_SHIFT) + ((dmagdx * y2 * x2) >> XY_MAGNIFICATION_SHIFT)) + ((x2 * camrotsin) >> FRACT_RESCALE_SHIFT) + ((y2 * camrotcos) >> FRACT_RESCALE_SHIFT);
pZC[ix] = ((pZCurvX[x] + pZCurvY[y]) >> Z_CURVATURE_SHIFT);
}
}
__global__ void setXYZs_kernel(short *pCX, short *pCY, short *pCZ, int total, int img, short *pX, short *pY, short z)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int x = pX[img];
int y = pY[img];
if (ix < total)
{
pCX[ix] = x;
pCY[ix] = y;
pCZ[ix] += z;
}
}
__global__ void correctdemag_kernel(IntCluster *pC, int cblock, int imgclusters, int demag, int width, int height)
{
int w2 = width >> 1;
int h2 = height >> 1;
int ic, imin, imax;
imin = (threadIdx.x + blockIdx.x * blockDim.x) * cblock;
imax = imin + cblock;
if (imax > imgclusters) imax = imgclusters;
IntCluster *pc = pC + imin;
for (ic = imin; ic < imax; ic++)
{
pc->X = (((pc->X - w2) * demag) >> DEMAG_SHIFT) + pc->X;
pc->Y = (((pc->Y - h2) * demag) >> DEMAG_SHIFT) + pc->Y;
pc++;
}
}
__global__ void resetcounts_kernel(int *pmapcounts, int deltas2, int *pbest)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int i;
if (ix < deltas2) pmapcounts[ix] = 0;
if (ix == 0) *pbest = 0;//0xffffffff;
}
__global__ void makedeltas_kernel(int *pDeltas, int tol, int deltasx, int deltasy, short *pdx, short *pdy, int img)
{
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = tol * (i - deltasx / 2) + pdx[img];
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = tol * (i - deltasy / 2) + pdy[img];
}
__global__ void makedeltas_fromshift_kernel(int *pDeltas, int tol, int deltasx, int deltasy, int *pBestDeltas, int *pBest, int bestdeltasx)
{
int best = *pBest & 0xffff;
int biy = best / bestdeltasx;
int bix = best % bestdeltasx;
int bestdx = pBestDeltas[bix];
int bestdy = pBestDeltas[biy + bestdeltasx];
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = tol * (i - deltasx / 2) + bestdx;
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = tol * (i - deltasy / 2) + bestdy;
}
__global__ void makefinaldeltas_fromshift_kernel(int *pDeltas, int tol, int *pBestDeltas, int *pBest, int bestdeltasx, short *px, short *py, short *pdx, short *pdy, int img, int totalimg)
{
int best = *pBest & 0xffff;
int bestdx = pBestDeltas[best % bestdeltasx];
int bestdy = pBestDeltas[best / bestdeltasx + bestdeltasx];
pDeltas[0] = bestdx;
pDeltas[1] = bestdy;
int corrx = bestdx - pdx[img];
int corry = bestdy - pdy[img];
px[img] += corrx;
py[img] += corry;
pdx[img] = bestdx;
pdy[img] = bestdy;
// THIS WAS A BUG: NO COMPENSATION OF CORRECTIONS! DO NOT RESTORE!
if (img < totalimg - 1)
{
pdx[img + 1] -= corrx;
pdy[img + 1] -= corry;
}
}
__global__ void rescaleshifts_kernel(short *px, short *py, short *pdx, short *pdy, int refimg, int totalimg)
{
int dx = pdx[refimg];
int dy = pdy[refimg];
int img;
for (img = 0; img < totalimg; img++)
{
px[img] -= dx;
py[img] -= dy;
pdx[img] -= dx;
pdy[img] -= dy;
}
}
__global__ void findbest_kernel(int *pMapCounts, int deltas2, int step, int *pBest)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x) * 2 * step;
if (step == 1)
{
if (idx < deltas2) pBest[idx] = idx;
if (idx + step < deltas2) pBest[idx + step] = idx + step;
}
if (idx < deltas2 && idx + step < deltas2)
{
if (pMapCounts[idx] < pMapCounts[idx + step])
{
pBest[idx] = pBest[idx + step];
pMapCounts[idx] = pMapCounts[idx + step];
}
}
}
__global__ void sumcounts_kernel(int *pMapCounts, int deltas2, int total, int step)
{
int idelta = threadIdx.y + blockIdx.y * blockDim.y;
int idx = ((threadIdx.x + blockIdx.x * blockDim.x) * step) << 1;
if (idelta < deltas2 && (idx + step) < total)
pMapCounts[idx * deltas2 + idelta] += pMapCounts[(idx + step) * deltas2 + idelta];
}
__global__ void maphash_kernel(IntCluster *pC, int nc, int clusterblocksize, int i, Cell *pCell, IntCluster **pCellContents, int cellsize, int maxcellcontent, int nx, int ny)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
//for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int a = atomicAdd(&qCell->Count, 1);
if (a > maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntCluster **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[a] = pC + ic;
}
}
}
__global__ void maphash_minarea_kernel(IntCluster *pC, int nc, int clusterblocksize, int i, Cell *pCell, IntCluster **pCellContents, int cellsize, int maxcellcontent, int nx, int ny, int minarea)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
//for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
if (pC[ic].Area < minarea) return; //continue;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int a = atomicAdd(&qCell->Count, 1);
if (a > maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntCluster **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[a] = pC + ic;
}
}
}
__global__ void clearhash_kernel(IntCluster *pC, int nc, int clusterblocksize, Cell *pCell, int cellsize, int nx, int ny)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i, ic, ix, iy;
for (i = 0; i < clusterblocksize; i++)
{
ic = idx * clusterblocksize + i;
if (ic >= nc) return;
ix = pC[ic].X / cellsize;
if (ix < 0 || ix >= nx) continue;
iy = pC[ic].Y / cellsize;
if (iy < 0 || iy >= ny) continue;
pCell[iy * nx + ix].Count = 0;
}
}
__global__ void compactchains_kernel(IntChain *pCompact, int *pChainBase, IntChain *pOriginal, int *pChainCounts, int chainblocksize)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i;
int base = pChainBase[idx];
for (i = 0; i < pChainCounts[idx]; i++)
pCompact[base + i] = pOriginal[idx * chainblocksize + i];
}
__global__ void makechainwindow_kernel(ChainMapWindow *pChMapWnd, short *px, short *py, int imgs, int width, int height, float pxmicron, float pymicron, int maxcells, int mincellsize, ChainView *pChV, Cell *pCells, IntCluster **pCellContent, int maxcellcontent, int stagex, int stagey, ChainView *pChLastV)
{
int sumx = (px[0] * pxmicron);
int sumy = (py[0] * pymicron);
int i;
int minx = sumx;
int maxx = sumx;
int miny = sumy;
int maxy = sumy;
int w = abs(width * pxmicron);
int h = abs(height * pymicron);
for (i = 1; i < imgs; i++)
{
sumx = (px[i] * pxmicron);
if (sumx < minx) minx = sumx;
if (sumx > maxx) maxx = sumx;
sumy = (py[i] * pymicron);
if (sumy < miny) sumy = miny;
if (sumy > maxy) sumy = maxy;
}
minx -= (w / 2);
maxx += (w / 2);
miny -= (h / 2);
maxy += (h / 2);
int cells = 2 * pChV->Count;
if (cells > sqrt((float)maxcells)) cells = sqrt((float)maxcells);
if (cells < 1) cells = 1;
width = maxx - minx;
height = maxy - miny;
if (cells > width / mincellsize) cells = width / mincellsize;
if (cells > height / mincellsize) cells = height / mincellsize;
pChMapWnd->MinX = minx + stagex/* + (pChLastV ? pChLastV->DeltaX : 0)*/;
pChMapWnd->MaxX = maxx + stagex/* + (pChLastV ? pChLastV->DeltaX : 0)*/;
pChMapWnd->MinY = miny + stagey/* + (pChLastV ? pChLastV->DeltaY : 0)*/;
pChMapWnd->MaxY = maxy + stagey/* + (pChLastV ? pChLastV->DeltaY : 0)*/;
pChMapWnd->Width = width;
pChMapWnd->Height = height;
pChMapWnd->CellSize = __max(1, __max(width / cells, height / cells));
pChMapWnd->MaxCellContent = maxcellcontent;
pChMapWnd->NXCells = __max(1, width / pChMapWnd->CellSize);
pChMapWnd->NYCells = __max(1, height / pChMapWnd->CellSize);
pChMapWnd->pCells = pCells;
pChMapWnd->pChains = (IntChain **)(void *)pCellContent;
}
__global__ void maphashchain_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int chainblocksize, int i)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ic, ix, iy;
Cell *qCell = 0;
IntChain *pC = pChV->Chains;
int nc = pChV->Count;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
Cell *pCell = pChMapWnd->pCells;
IntChain **pCellContents = pChMapWnd->pChains;
//for (i = 0; i < chainblocksize; i++)
{
ic = idx * chainblocksize + i;
if (ic >= nc) return;
ix = (pC[ic].AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return; //continue;
iy = (pC[ic].AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return; //continue;
qCell = pCell + iy * nx + ix;
int c = atomicAdd(&qCell->Count, 1);
if (c >= maxcellcontent)
atomicExch(&qCell->Count, maxcellcontent);
else
{
IntChain **qCellContents = pCellContents + maxcellcontent * (iy * nx + ix);
qCellContents[c] = pC + ic;
}
}
}
__global__ void makechaindeltas_kernel(int *pDeltas, int xytol, int ztol, int deltasx, int deltasy, int deltasz, ChainView *plastview, int xc, int yc, float xslant, float yslant, float dxdz, float dydz)
{
plastview = 0;
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = xytol * (i - deltasx / 2) + (plastview ? plastview->DeltaX : 0);
for (i = 0; i < deltasy; i++)
pDeltas[i + deltasx] = xytol * (i - deltasy / 2) + (plastview ? plastview->DeltaY : 0);
for (i = 0; i < deltasz; i++)
{
pDeltas[i + deltasx + deltasy] = ztol * (i - deltasz / 2) + (plastview ? (plastview->DeltaZ/* + (((int)(xslant * (xc - plastview->PositionX) + yslant * (yc - plastview->PositionY))) >> (XY_SCALE_SHIFT - Z_SCALE_SHIFT))*/) : 0);
pDeltas[i + deltasx + deltasy + deltasz] = (ztol * (i - deltasz / 2) * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));//(pDeltas[i + deltasx + deltasy] * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));
pDeltas[i + deltasx + deltasy + 2 * deltasz] = (ztol * (i - deltasz / 2) * dydz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));//(pDeltas[i + deltasx + deltasy] * dxdz) * (1 << (XY_SCALE_SHIFT - Z_SCALE_SHIFT));
}
}
__global__ void makechaindeltas_fromshift_kernel(int *pDeltas, int xytol, int ztol, int deltasx, int deltasy, int deltasz, int *pBestDeltas, int *pBest, int bestdeltasx, int bestdeltasy, int bestdeltasz)
{
int best = (*pBest) & 0xffff;
int bdxy2 = bestdeltasx * bestdeltasy;
int biz = best / bdxy2;
int biy = (best % bdxy2) / bestdeltasx;
int bix = best % bestdeltasx;
int bestdx = pBestDeltas[bix] + pBestDeltas[biz + bestdeltasx + bestdeltasy + bestdeltasz];
int bestdy = pBestDeltas[biy + bestdeltasx] + pBestDeltas[biz + bestdeltasx + bestdeltasy + 2 * bestdeltasz];
int bestdz = pBestDeltas[biz + bestdeltasx + bestdeltasy];
int i;
for (i = 0; i < deltasx; i++)
pDeltas[i] = xytol * (i - deltasx / 2) + bestdx;
for (i = 0; i < deltasy; i++)
pDeltas[deltasx + i] = xytol * (i - deltasy / 2) + bestdy;
for (i = 0; i < deltasz; i++)
pDeltas[deltasx + deltasy + i] = ztol * (i - deltasz / 2) + bestdz;
}
__global__ void negshift_viewchains_kernel(ChainView *pview, int *pDeltas, int deltasXY, int deltasZ, int *pD)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int inc = gridDim.x * blockDim.x;
int best = *pD & 0xffff;
int dix = pDeltas[best % deltasXY];
int diy = pDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
int diz = pDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
IntChain *pC = (IntChain *)(void *)((char *)(void *)pview + sizeof(ChainView));
while (idx < pview->Count)
{
pC[idx].AvgX -= dix;
pC[idx].AvgY -= diy;
pC[idx].AvgZ -= diz;
idx += inc;
}
}
__global__ void setchainviewheader_kernel(ChainMapHeader *pmaph, ChainView *pview, int px, int py, int pz, int *pDeltas, int deltasXY, int deltasZ, int *pD)
{
pview->PositionX = px;
pview->PositionY = py;
pview->PositionZ = pz;
pview->DeltaX = pview->DeltaY = pview->DeltaZ = 0;
/*
if (pDeltas == 0)
{
pview->DeltaX = pview->DeltaY = pview->DeltaZ = 0;
pmaph->Views = 1;
}
else
{
int best = (*pD) & 0xffff;
pview->DeltaX = pDeltas[best % deltasXY];
pview->DeltaY = pDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
pview->DeltaZ = pDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
pmaph->Views++;
}
*/
}
/*****************************/
__global__ void compact_kernel(int * pInt, int stride, int count, int * pOut)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) pOut[i] = pInt[i * stride];
}
__global__ void max_check_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
pInt[i] = __max(pInt[i], pInt[i + halftotal]);
}
__global__ void max_kernel(int * pInt, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
pInt[i] = __max(pInt[i], pInt[i + halftotal]);
}
__global__ void sum_check_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
pInt[i] += pInt[i + halftotal];
}
__global__ void sum_kernel(int * pInt, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
pInt[i] += pInt[i + halftotal];
}
__global__ void sum_check_multiple_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + halftotal < total)
{
i += blockIdx.y * total;
pInt[i] += pInt[i + halftotal];
}
}
__global__ void sum_multiple_kernel(int * pInt, int total, int halftotal)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + (blockIdx.y * total);
pInt[i] += pInt[i + halftotal];
}
__global__ void shift_postfixid_kernel(int *pdest, int *psrc, int total)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= total) return;
pdest[id] = (psrc[id] << 16) | (id & 0xffff);
}
__global__ void split_and_index_kernel(int *paircomputer, int depth, IntPair *pairindices, int totalpairs)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= totalpairs) return;
int d;
int res = id + 1;
int countatlevel = 2;
int place = 0;
for (d = 1; d < depth; d++)
{
place <<= 1;
if (paircomputer[place] < res)
{
res -= paircomputer[place];
place++;
}
paircomputer -= countatlevel;
countatlevel <<= 1;
}
pairindices[id].Index1 = place;
pairindices[id].Index2 = res - 1;
}
__global__ void trymap2_prepare_clusters_kernel(IntCluster *pc, IntMapCluster *pmc, int totalclusters, int divider, int mingrainsize, int w2, int h2, int demag, int *pValidFlag)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int idd = id * divider;
if (idd >= totalclusters) return;
pmc += id;
pc += idd;
if (pc->Area < mingrainsize)
{
pmc->idoriginal = -1;
if (pValidFlag) pValidFlag[id] = 0;
return;
}
pmc->idoriginal = idd;
pmc->ibasex = (((pc->X - w2) * demag) >> DEMAG_SHIFT) + pc->X;
pmc->ibasey = (((pc->Y - h2) * demag) >> DEMAG_SHIFT) + pc->Y;
if (pValidFlag) pValidFlag[id] = 1;
}
__global__ void trymap2_shift_kernel(IntMapCluster *pmc, int totalmapclusters, int *pDeltaX, int *pDeltaY, int cellsize)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalmapclusters) return;
pmc += id;
pmc->icell = -1;
if (pmc->idoriginal < 0) return;
pmc->ishiftedx = pmc->ibasex + *pDeltaX;
pmc->icellx = pmc->ishiftedx / cellsize;
pmc->ishiftedy = pmc->ibasey + *pDeltaY;
pmc->icelly = pmc->ishiftedy / cellsize;
}
__global__ void trymap2_shiftmatch_kernel(IntMapCluster *pmc, IntPair *pPairs, int totalpairs, int *pDeltas, int cellsize, short nx, short ny, int *pmatchresult, int tol, Cell *pmapcell, IntCluster **pMapCellContent, int maxcellcontent)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
int ishiftedx = pmc->ibasex + pDeltas[blockIdx.y];
int ishiftedy = pmc->ibasey + pDeltas[gridDim.y + blockIdx.z];
int icellx = ishiftedx / cellsize;
int icelly = ishiftedy / cellsize;
pmatchresult += (blockIdx.z * gridDim.y + blockIdx.y) * totalpairs + id;
int imatchresult = 0;
if (icellx >= 0 && icellx < nx && icelly >= 0 && icelly < ny)
{
int icell = icelly * (int)nx + icellx;
pMapCellContent += icell * maxcellcontent;
short i = pmapcell[icell].Count;
while (--i >= 0)
{
IntCluster *pc2 = pMapCellContent[i];
imatchresult |= (abs(ishiftedx - pc2->X) < tol && abs(ishiftedy - pc2->Y) < tol) ? 1 : 0;
}
}
*pmatchresult = imatchresult;
}
__global__ void finalmap_cell_kernel(IntMapCluster *pmc, int totalmapclusters, Cell *pmapcell, int *pClustersInCell, int nx, int ny)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalmapclusters) return;
pmc += id;
pClustersInCell[id] = 0;
pmc->icell = -1;
if (pmc->idoriginal < 0) return;
int mapclusters = 0;
#pragma unroll 3
for (int iiy = -1; iiy <= 1; iiy++)
{
int icelly = pmc->icelly + iiy;
if (icelly < 0 || icelly >= ny) continue;
#pragma unroll 3
for (int iix = -1; iix <= 1; iix++)
{
int icellx = pmc->icellx + iix;
if (icellx < 0 || icellx >= nx) continue;
mapclusters += pmapcell[icelly * nx + icellx].Count;
}
}
pClustersInCell[id] = pmc->ipairblockcount = mapclusters;
}
__global__ void finalmap_match_kernel(IntMapCluster *pmc, IntPair *pPairs, int totalpairs, int *pmatchresult, int *pmatchmap, int tol, Cell *pmapcell, IntCluster **pMapCellContent, int maxcellcontent, int nx, int ny)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
if (pmc->idoriginal < 0) return;
if (pPairs[id].Index2 == 0) pmc->ipairblockstart = id;
int mapclusters = 0;
#pragma unroll 3
for (int iiy = -1; iiy <= 1; iiy++)
{
int icelly = pmc->icelly + iiy;
if (icelly < 0 || icelly >= ny) continue;
#pragma unroll 3
for (int iix = -1; iix <= 1; iix++)
{
int icellx = pmc->icellx + iix;
if (icellx < 0 || icellx >= nx) continue;
int inc = pmapcell[icelly * nx + icellx].Count;
if (mapclusters + inc <= pPairs[id].Index2)
{
mapclusters += inc;
}
else
{
int idc2 = (icelly * nx + icellx) * maxcellcontent + pPairs[id].Index2 - mapclusters;
IntCluster *pc2 = pMapCellContent[idc2];
int dist = __max(abs(pmc->ishiftedx - pc2->X), abs(pmc->ishiftedy - pc2->Y));
if (dist < tol)
{
pmatchresult[id] = dist;
pmatchmap[id] = idc2;
}
else
{
pmatchresult[id] = pmatchmap[id] = -1;
}
return;
}
}
}
}
__global__ void finalmap_optimize_kernel(IntCluster *pc, IntMapCluster *pmc, int clusteroffset, int totalclusters, int *pmatchresult, int *pmatchmap, IntCluster **pMapCellContent, IntCluster **pClusterChain)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalclusters) return;
pmc += id;
if (pmc->idoriginal < 0) return;
if (pmc->ipairblockcount <= 0) return;
int i = pmc->ipairblockcount - 1;
int ibest = i;
int iblockstart = pmc->ipairblockstart;
int d;
int dbest = pmatchresult[iblockstart + i];
while (--i >= 0)
{
d = pmatchresult[iblockstart + i];
if (d >= 0 && (dbest < 0 || dbest > d))
{
ibest = i;
dbest = d;
}
}
if (0/*KRYSS DISABLE CHAIN FORMATION 20140728 dbest >= 0*/)
{
IntCluster *pBest = pMapCellContent[pmatchmap[iblockstart + ibest]];
pBest->Area = -abs(pBest->Area);
pc += pmc->idoriginal;
pc->X += (pBest->X - pmc->ibasex);
pc->Y += (pBest->Y - pmc->ibasey);
pClusterChain[clusteroffset + pmc->idoriginal] = pBest;
}
else pClusterChain[clusteroffset + pmc->idoriginal] = 0;
}
__global__ void makechain_kernel(IntCluster *pC, int totalclusters, short w2, short h2, short *pClusterXs, short *pClusterYs, short *pClusterZs, int xslant, int yslant, IntCluster **pClusterChains, short minclusters, short minvol, float xtomicron, float ytomicron, int stagex, int stagey, IntChain *pChain, int viewtag, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalclusters) return;
pvalid[id] = pChain[id].Volume = pChain[id].Clusters = 0;
int avgx = 0;
int avgy = 0;
int avgz = 0;
IntCluster *pnc = pC + id;
if (pnc->Area < 0) return;
short clusters = 1;
int area = abs(pnc->Area);
int volume = area;
avgx = (pnc->X + pClusterXs[id]) * area;
avgy = (pnc->Y + pClusterYs[id]) * area;
avgz = area * (pClusterZs[id] - (( (xslant * (pnc->X - w2) + yslant * (pnc->Y - h2)) >> SLOPE_SHIFT)));
int ip;
while (pnc = pClusterChains[ip = pnc - pC])
{
ip = pnc - pC;
clusters++;
area = abs(pnc->Area);
volume += area;
avgx += area * (pnc->X + pClusterXs[ip]);
avgy += area * (pnc->Y + pClusterYs[ip]);
avgz += area * (pClusterZs[ip] - (( (xslant * (pnc->X - w2) + yslant * (pnc->Y - h2)) >> SLOPE_SHIFT)));
}
if (clusters >= minclusters && volume >= minvol)
{
IntChain *psC = pChain + id;
psC->Clusters = clusters;
avgx /= volume;
avgy /= volume;
avgz /= volume;
psC->Volume = volume;
psC->AvgX = ((avgx - w2) << XY_SCALE_SHIFT) * xtomicron + stagex;
psC->AvgY = ((avgy - h2) << XY_SCALE_SHIFT) * ytomicron + stagey;
psC->AvgZ = avgz;
psC->ViewTag = viewtag;
psC->Reserved = 0;
pvalid[id] = 1;
}
}
__global__ void maphashchain_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int divider)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx * divider >= pChV->Count) return;
IntChain *pC = pChV->Chains + idx * divider;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
int ix, iy;
ix = (pC->AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return;
iy = (pC->AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return;
Cell *qCell = pChMapWnd->pCells + iy * nx + ix;
int c = atomicAdd(&qCell->Count, 1);
if (c >= maxcellcontent) atomicExch(&qCell->Count, maxcellcontent);
else pChMapWnd->pChains[maxcellcontent * (iy * nx + ix) + c] = pC;
}
__global__ void clearhashchain2_kernel(ChainView *pChV, ChainMapWindow *pChMapWnd, int divider)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx * divider >= pChV->Count) return;
IntChain *pC = pChV->Chains + idx * divider;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
int minx = pChMapWnd->MinX;
int miny = pChMapWnd->MinY;
int cellsize = pChMapWnd->CellSize;
int maxcellcontent = pChMapWnd->MaxCellContent;
int ix, iy;
ix = (pC->AvgX - minx) / cellsize;
if (ix < 0 || ix >= nx) return;
iy = (pC->AvgY - miny) / cellsize;
if (iy < 0 || iy >= ny) return;
pChMapWnd->pCells[iy * nx + ix].Count = 0;
}
__global__ void trymapchain_prepare_chains_kernel(IntChain *pc, IntMapChain *pmc, int totalchains, int minchainsize, int *pValidFlag)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalchains) return;
pmc += id;
pc += id;
if (pValidFlag) pValidFlag[id] = 0;
if (pc->Volume < minchainsize)
{
pmc->idoriginal = -1;
return;
}
pmc->idoriginal = id;
pmc->ibasex = pc->AvgX;
pmc->ibasey = pc->AvgY;
pmc->ibasez = pc->AvgZ;
if (pValidFlag) pValidFlag[id] = 1;
}
__global__ void trymapchain_shiftmatch_kernel(IntMapChain *pmc, IntPair *pPairs, int totalpairs, int *pMapCount, int *pDeltas, ChainMapWindow *pChMapWnd, int xytol, short zsteps, int ztol)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
int ishiftedx = pmc->ibasex + pDeltas[blockIdx.y];
int ishiftedy = pmc->ibasey + pDeltas[gridDim.y + blockIdx.z];
int ibasez = pmc->ibasez;
int icellx = (ishiftedx - pChMapWnd->MinX) / pChMapWnd->CellSize;
int icelly = (ishiftedy - pChMapWnd->MinY) / pChMapWnd->CellSize;
pMapCount += blockIdx.z * gridDim.y + blockIdx.y;
if (icellx >= 0 && icellx < pChMapWnd->NXCells && icelly >= 0 && icelly < pChMapWnd->NYCells)
{
int icell = icelly * (int)pChMapWnd->NXCells + icellx;
IntChain **pCellContent = pChMapWnd->pChains + icell * pChMapWnd->MaxCellContent;
short i = pChMapWnd->pCells[icell].Count;
for (int iz = zsteps - 1; iz >= 0; iz--)
{
bool hasmatch = 0;
while (--i >= 0)
{
IntChain *pc2 = pCellContent[i];
hasmatch = hasmatch || (abs(ishiftedx - pc2->AvgX) < xytol && abs(ishiftedy - pc2->AvgY) < xytol && abs(ibasez + pDeltas[gridDim.y + gridDim.z + iz] - pc2->AvgZ) < ztol);
}
if (hasmatch) atomicAdd(pMapCount + (iz * gridDim.y * gridDim.z), 1);
}
}
}
__global__ void make_finalchainshift_kernel(int *pDeltas, int *pRefineDeltas, int *pBest, int deltasXY)
{
int best = *pBest & 0xffff;
int dix = pRefineDeltas[best % deltasXY];
int diy = pRefineDeltas[deltasXY + ((best % (deltasXY * deltasXY)) / deltasXY)];
int diz = pRefineDeltas[2 * deltasXY + (best / (deltasXY * deltasXY))];
pDeltas[0] = dix;
pDeltas[1] = diy;
pDeltas[2] = diz;
}
__global__ void finalmapchain_cell_kernel(IntMapChain *pmc, IntPair *pPairs, int totalpairs, int *pDeltas, ChainMapWindow *pChMapWnd, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
pmc->ishiftedx = pmc->ibasex + pDeltas[0];
pmc->ishiftedy = pmc->ibasey + pDeltas[1];
pmc->ishiftedz = pmc->ibasez + pDeltas[2];
int icellx = (pmc->ishiftedx - pChMapWnd->MinX) / pChMapWnd->CellSize;
int icelly = (pmc->ishiftedy - pChMapWnd->MinY) / pChMapWnd->CellSize;
int nx = pChMapWnd->NXCells;
int ny = pChMapWnd->NYCells;
if (icelly < 0 || icelly >= ny || icellx < 0 || icellx >= nx)
{
pvalid[pPairs[id].Index1] = 0;
return;
}
pvalid[pPairs[id].Index1] = pChMapWnd->pCells[pmc->icell = icelly * (int)nx + icellx].Count;
}
__global__ void finalmapchain_match_kernel(IntChain *pc, IntMapChain *pmc, IntPair *pPairs, int totalpairs, ChainMapWindow *pChMapWnd, int xytol, int ztol)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalpairs) return;
pmc += pPairs[id].Index1;
IntChain *pc2 = pChMapWnd->pChains[pmc->icell * pChMapWnd->MaxCellContent + pPairs[id].Index2];
if (abs(pmc->ishiftedx - pc2->AvgX) < xytol && abs(pmc->ishiftedy - pc2->AvgY) < xytol && abs(pmc->ishiftedz - pc2->AvgZ) < ztol)
{
pc[pPairs[id].Index1].Volume = pc[pPairs[id].Index1].Clusters = 0;
}
}
__global__ void finalmapchain_filter_kernel(IntChain *pc, int totalchains, int *pvalid)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= totalchains) return;
pvalid[id] = (pc[id].Volume > 0) ? 1 : 0;
}
__global__ void compactchains_kernel(IntChain *pcmpct, IntChain *pch, IntPair *pPairs, int totalpairs, ChainView *pChV)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id == 0) pChV->Count = totalpairs;
if (id >= totalpairs) return;
pcmpct[id] = pch[pPairs[id].Index1];
}
__global__ void negshift_viewchains_kernel(ChainView *pview, int *pDeltas)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= pview->Count) return;
int dix = pDeltas[0];
int diy = pDeltas[1];
int diz = pDeltas[2];
IntChain *pC = (IntChain *)(void *)((char *)(void *)pview + sizeof(ChainView)) + idx;
pC->AvgX -= dix;
pC->AvgY -= diy;
pC->AvgZ -= diz;
}
/*****************************/
} } |
c6bf3138a310411c8833aba4d457ce9d6b038311.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "numMayor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_v = NULL;
hipMalloc(&d_v, XSIZE*YSIZE);
float *d_pos = NULL;
hipMalloc(&d_pos, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
numMayor), dim3(gridBlock),dim3(threadBlock), 0, 0, d_v,d_pos);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
numMayor), dim3(gridBlock),dim3(threadBlock), 0, 0, d_v,d_pos);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
numMayor), dim3(gridBlock),dim3(threadBlock), 0, 0, d_v,d_pos);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c6bf3138a310411c8833aba4d457ce9d6b038311.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "numMayor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_v = NULL;
cudaMalloc(&d_v, XSIZE*YSIZE);
float *d_pos = NULL;
cudaMalloc(&d_pos, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
numMayor<<<gridBlock,threadBlock>>>(d_v,d_pos);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
numMayor<<<gridBlock,threadBlock>>>(d_v,d_pos);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
numMayor<<<gridBlock,threadBlock>>>(d_v,d_pos);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
86690d82ffbf9e0e4078d4722fb85455654f6abd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <linalg/eltwise2d.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
hipStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
hipLaunchKernelGGL(( naiveEltwise2DAddKernel<Type>)
, dim3(nblks), dim3(TPB), 0, stream, rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
raft::allocate(in1, h);
raft::allocate(in2, w);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in1, h, T(-1.0), T(1.0), stream);
r.uniform(in2, w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(h, w, in1, in2, out_ref, out_ref, (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1, in2, out, out, (T)1, (T)1);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
Eltwise2dInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref, out, params.w * params.h, raft::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref, out, params.w * params.h, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
| 86690d82ffbf9e0e4078d4722fb85455654f6abd.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <linalg/eltwise2d.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
naiveEltwise2DAddKernel<Type>
<<<nblks, TPB, 0, stream>>>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
raft::allocate(in1, h);
raft::allocate(in2, w);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(in1, h, T(-1.0), T(1.0), stream);
r.uniform(in2, w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(h, w, in1, in2, out_ref, out_ref, (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1, in2, out, out, (T)1, (T)1);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
Eltwise2dInputs<T> params;
T *in1, *in2, *out_ref, *out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref, out, params.w * params.h, raft::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref, out, params.w * params.h, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
804fa23bae02b74b19fd32e087b9e30aac58a178.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
__global__ void kernelScaVecAdd(const float* A, const float alpha, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = A[i] + alpha;
}
__global__ void kernelVecVecSubtract(const float* A, float* B, float* C, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
C[i] = A[i] - B[i];
}
__global__ void kernelVecVecElementMultiply(const float* A, float* B, float* C, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
C[i] = B[i] * A[i];
}
__global__ void kernelAbsVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = abs(A[i]);
}
__global__ void kernelSigmoidVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = 1 / (1 + exp(-A[i]));
}
__global__ void kernelSigmoidGradVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
{
float temp = 1 / (1 + exp(-A[i]));
B[i] = temp * (1 - temp);
}
}
__global__ void kernelAddBiasMat(float* A, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
A[i] = 1.0f;
}
void scaVecAddGPU(const float* A, const float alpha, float* B, int M)
{
hipLaunchKernelGGL(( kernelScaVecAdd), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, alpha, B, M);
}
void vecVecSubtractGPU(const float* A, float* B, float* C, int M)
{
hipLaunchKernelGGL(( kernelVecVecSubtract), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, B, C, M);
}
void vecVecElementMultiplyGPU(const float* A, float* B, float* C, int M)
{
hipLaunchKernelGGL(( kernelVecVecElementMultiply), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, B, C, M);
}
void absVecGPU(const float* A, float* B, int M)
{
hipLaunchKernelGGL(( kernelAbsVec), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, B, M);
}
void sigmoidVecGPU(const float* A, float* B, int M)
{
hipLaunchKernelGGL(( kernelSigmoidVec), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, B, M);
}
void sigmoidGradVecGPU(const float* A, float* B, int M)
{
hipLaunchKernelGGL(( kernelSigmoidGradVec), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, B, M);
}
void addBiasMatGPU(float* A, int M)
{
hipLaunchKernelGGL(( kernelAddBiasMat), dim3(NUM_BLOCKS(M)), dim3(BLOCK_THREADS), 0, 0, A, M);
} | 804fa23bae02b74b19fd32e087b9e30aac58a178.cu | #include "kernels.h"
__global__ void kernelScaVecAdd(const float* A, const float alpha, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = A[i] + alpha;
}
__global__ void kernelVecVecSubtract(const float* A, float* B, float* C, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
C[i] = A[i] - B[i];
}
__global__ void kernelVecVecElementMultiply(const float* A, float* B, float* C, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
C[i] = B[i] * A[i];
}
__global__ void kernelAbsVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = abs(A[i]);
}
__global__ void kernelSigmoidVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
B[i] = 1 / (1 + exp(-A[i]));
}
__global__ void kernelSigmoidGradVec(const float* A, float* B, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
{
float temp = 1 / (1 + exp(-A[i]));
B[i] = temp * (1 - temp);
}
}
__global__ void kernelAddBiasMat(float* A, int M)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M)
A[i] = 1.0f;
}
void scaVecAddGPU(const float* A, const float alpha, float* B, int M)
{
kernelScaVecAdd<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, alpha, B, M);
}
void vecVecSubtractGPU(const float* A, float* B, float* C, int M)
{
kernelVecVecSubtract<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, B, C, M);
}
void vecVecElementMultiplyGPU(const float* A, float* B, float* C, int M)
{
kernelVecVecElementMultiply<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, B, C, M);
}
void absVecGPU(const float* A, float* B, int M)
{
kernelAbsVec<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, B, M);
}
void sigmoidVecGPU(const float* A, float* B, int M)
{
kernelSigmoidVec<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, B, M);
}
void sigmoidGradVecGPU(const float* A, float* B, int M)
{
kernelSigmoidGradVec<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, B, M);
}
void addBiasMatGPU(float* A, int M)
{
kernelAddBiasMat<<<NUM_BLOCKS(M), BLOCK_THREADS>>>(A, M);
} |
0e1f97ec1f9b5e7791072c48d932d23d201bf0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <device_launch_parameters.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
/*int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numRows && y < numCols)
{
int pos = y * numCols + x;
assert((filterWidth % 2) == 1);
for (int filterRow = -filterWidth / 2; filterRow <= filterWidth / 2; filterRow++)
{
for (int filterCol = -filterWidth / 2; filterCol <= filterWidth / 2; filterCol++)
{
int neighborRows = min(numRows - 1, max(0, y + filterRow));
int neighborCols = min(numCols - 1, max(0, x + filterCol));
int pos = neighborCols * numCols + neighborRows;
int neighbor = static_cast<float>(inputChannel[pos]);
int filter_pos = (filterRow + filterWidth / 2) * filterWidth;
outputChannel[pos] += outputChannel[neighbor] * filter[filter_pos];
}
}
}*/
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.y >= numRows || thread_2D_pos.x >= numCols)
return;
int current_row = thread_2D_pos.y;
int current_col = thread_2D_pos.x;
float result = 0.f;
for (int filter_row = -filterWidth / 2; filter_row <= filterWidth / 2; filter_row++)
{
for (int filter_col = -filterWidth / 2; filter_col <= filterWidth / 2; filter_col++)
{
int image_row = min(numRows - 1, max(current_row + filter_row, 0));
int image_col = min(numCols - 1, max(current_col + filter_col, 0));
float image_value = static_cast<float>(inputChannel[image_row * numCols + image_col]);
float filter_value = filter[(filter_row + filterWidth / 2) * filterWidth + filter_col + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numCols && y < numRows)
{
redChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].x;
greenChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].y;
blueChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//checkCudaErrors(hipMemcpy(d_inputImageRGBA, h_inputImageRGBA, sizeof(float) * numRows * numCols, hipMemcpyHostToDevice));
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16, 16);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur << <gridSize, blockSize >> > (d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> > (d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> > (d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 0e1f97ec1f9b5e7791072c48d932d23d201bf0d4.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <device_launch_parameters.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
/*int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numRows && y < numCols)
{
int pos = y * numCols + x;
assert((filterWidth % 2) == 1);
for (int filterRow = -filterWidth / 2; filterRow <= filterWidth / 2; filterRow++)
{
for (int filterCol = -filterWidth / 2; filterCol <= filterWidth / 2; filterCol++)
{
int neighborRows = min(numRows - 1, max(0, y + filterRow));
int neighborCols = min(numCols - 1, max(0, x + filterCol));
int pos = neighborCols * numCols + neighborRows;
int neighbor = static_cast<float>(inputChannel[pos]);
int filter_pos = (filterRow + filterWidth / 2) * filterWidth;
outputChannel[pos] += outputChannel[neighbor] * filter[filter_pos];
}
}
}*/
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.y >= numRows || thread_2D_pos.x >= numCols)
return;
int current_row = thread_2D_pos.y;
int current_col = thread_2D_pos.x;
float result = 0.f;
for (int filter_row = -filterWidth / 2; filter_row <= filterWidth / 2; filter_row++)
{
for (int filter_col = -filterWidth / 2; filter_col <= filterWidth / 2; filter_col++)
{
int image_row = min(numRows - 1, max(current_row + filter_row, 0));
int image_col = min(numCols - 1, max(current_col + filter_col, 0));
float image_value = static_cast<float>(inputChannel[image_row * numCols + image_col]);
float filter_value = filter[(filter_row + filterWidth / 2) * filterWidth + filter_col + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numCols && y < numRows)
{
redChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].x;
greenChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].y;
blueChannel[y * numCols + x] = inputImageRGBA[y * numCols + x].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//checkCudaErrors(cudaMemcpy(d_inputImageRGBA, h_inputImageRGBA, sizeof(float) * numRows * numCols, cudaMemcpyHostToDevice));
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16, 16);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur << <gridSize, blockSize >> > (d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> > (d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> > (d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
caa7c76b3b3be7ae7be1b5cd6595ca9a77669c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "output_handler_variance.hpp"
#include "core/cuda/error.hpp"
#include <device_launch_parameters.h>
namespace mufflon { namespace renderer {
template < class PixelType, bool ReduceMoments >
__global__ void update_iter_kernel(ConstRenderTargetBuffer<Device::CUDA, PixelType> iterTarget,
RenderTargetBuffer<Device::CUDA, float> cumTarget,
RenderTargetBuffer<Device::CUDA, float> varTarget,
int numChannels,
int width, int height,
float iteration) {
#ifdef __CUDA_ARCH__
int x = int(blockIdx.x * blockDim.x + threadIdx.x);
int y = int(blockIdx.y * blockDim.y + threadIdx.y);
if(x < width && y < height)
output_handler_details::UpdateIter<PixelType, ReduceMoments>::f(iterTarget,
cumTarget, varTarget, x, y, numChannels, width, iteration);
#endif
}
namespace output_handler_details {
template < class PixelType, bool ReduceMoments >
void update_iter_cuda(ConstRenderTargetBuffer<Device::CUDA, PixelType> iterTarget,
RenderTargetBuffer<Device::CUDA, float> cumTarget,
RenderTargetBuffer<Device::CUDA, float> varTarget,
int numChannels, int width, int height, int iteration) {
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( update_iter_kernel<PixelType, ReduceMoments>), dim3(dimGrid), dim3(dimBlock), 0, 0,
iterTarget, cumTarget, varTarget, numChannels, width, height, float(iteration));
}
template void update_iter_cuda<float, true>(ConstRenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<float, false>(ConstRenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<i32, true>(ConstRenderTargetBuffer<Device::CUDA, i32>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<i32, false>(ConstRenderTargetBuffer<Device::CUDA, i32>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
__device__ u32 *s_nan_counter;
u32* get_cuda_nan_counter_ptr_and_set_zero() {
constexpr u32 zero = 0;
void* ptr = nullptr;
cuda::check_error(::hipGetSymbolAddress(&ptr, s_nan_counter));
cuda::check_error(::hipMemcpyToSymbolAsync(s_nan_counter, &zero, sizeof(zero),
0u, ::hipMemcpyHostToDevice));
return reinterpret_cast<u32*>(ptr);
}
u32 get_cuda_nan_counter_value() {
u32 counter = 0;
cuda::check_error(::hipMemcpyFromSymbolAsync(&counter, s_nan_counter, sizeof(counter),
0u, ::hipMemcpyDeviceToHost));
return counter;
}
} // namespace output_handler_details
}} // namespace mufflon::renderer | caa7c76b3b3be7ae7be1b5cd6595ca9a77669c18.cu | #include "output_handler_variance.hpp"
#include "core/cuda/error.hpp"
#include <device_launch_parameters.h>
namespace mufflon { namespace renderer {
template < class PixelType, bool ReduceMoments >
__global__ void update_iter_kernel(ConstRenderTargetBuffer<Device::CUDA, PixelType> iterTarget,
RenderTargetBuffer<Device::CUDA, float> cumTarget,
RenderTargetBuffer<Device::CUDA, float> varTarget,
int numChannels,
int width, int height,
float iteration) {
#ifdef __CUDA_ARCH__
int x = int(blockIdx.x * blockDim.x + threadIdx.x);
int y = int(blockIdx.y * blockDim.y + threadIdx.y);
if(x < width && y < height)
output_handler_details::UpdateIter<PixelType, ReduceMoments>::f(iterTarget,
cumTarget, varTarget, x, y, numChannels, width, iteration);
#endif
}
namespace output_handler_details {
template < class PixelType, bool ReduceMoments >
void update_iter_cuda(ConstRenderTargetBuffer<Device::CUDA, PixelType> iterTarget,
RenderTargetBuffer<Device::CUDA, float> cumTarget,
RenderTargetBuffer<Device::CUDA, float> varTarget,
int numChannels, int width, int height, int iteration) {
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
update_iter_kernel<PixelType, ReduceMoments><<<dimGrid, dimBlock>>>(
iterTarget, cumTarget, varTarget, numChannels, width, height, float(iteration));
}
template void update_iter_cuda<float, true>(ConstRenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<float, false>(ConstRenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<i32, true>(ConstRenderTargetBuffer<Device::CUDA, i32>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
template void update_iter_cuda<i32, false>(ConstRenderTargetBuffer<Device::CUDA, i32>,
RenderTargetBuffer<Device::CUDA, float>,
RenderTargetBuffer<Device::CUDA, float>,
int, int, int, int);
__device__ u32 *s_nan_counter;
u32* get_cuda_nan_counter_ptr_and_set_zero() {
constexpr u32 zero = 0;
void* ptr = nullptr;
cuda::check_error(::cudaGetSymbolAddress(&ptr, s_nan_counter));
cuda::check_error(::cudaMemcpyToSymbolAsync(s_nan_counter, &zero, sizeof(zero),
0u, ::cudaMemcpyHostToDevice));
return reinterpret_cast<u32*>(ptr);
}
u32 get_cuda_nan_counter_value() {
u32 counter = 0;
cuda::check_error(::cudaMemcpyFromSymbolAsync(&counter, s_nan_counter, sizeof(counter),
0u, ::cudaMemcpyDeviceToHost));
return counter;
}
} // namespace output_handler_details
}} // namespace mufflon::renderer |
52a6c5e2165daca53f381e1b0700684e1daa2af8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-08-03
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "Clip.h"
#include "Clip.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
set each entry to its clip value (CUDA Kernel)
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > upper)
b[i] = upper;
else if (a[i] < lower)
b[i] = lower;
else
b[i] = a[i];
}
}
/*
set each entry to its clip value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size)
{
return;
}
/*
set each entry to its clip value
>> a - input tensor we are processing
>> b - output tensor we are processing
>> lower - the lower border
>> upper - the upper border
*/
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower, upper, a->unitNum);
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
/*
clip backward computation of dE/dx (Cuda kernel)
dy/dx = 1 if lower <= x <= upper
0 otherwise
>> dedy - dE/dy
>> dedx - dE/dx
>> y - y of the function
>> x - x of the function
>> lower
>> upper
*/
__global__
void KernelClipBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * y, DTYPE * x, DTYPE lower, DTYPE upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
DTYPE s = x[i];
if (s > upper || s < lower)
dedx[i] = 0;
else
dedx[i] = dedy[i];
}
}
/*
backward computation (Cuda version)
dE/dx = dE/dy * dy/dx
hard tanh: y = upper if x > upper
x if lower <= x <= upper
lower if x< lower
and dy/dx = 1 if lower <= x <= upper
0 otherwise
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> dedx - dE/dx
>> lossName - type of loss function, e.g., cross entropy
*/
void _CudaClipBackward(XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, DTYPE lower, DTYPE upper)
{
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/dx = dE/dy * dy/dx */
hipLaunchKernelGGL(( KernelClipBackward) , dim3(dim3(gridSize[0])), dim3(dim3(blockSize[0])), 0, 0,
(DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)y->data, (DTYPE*)x->data,
lower, upper,
x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
| 52a6c5e2165daca53f381e1b0700684e1daa2af8.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-08-03
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "Clip.h"
#include "Clip.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set each entry to its clip value (CUDA Kernel)
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > upper)
b[i] = upper;
else if (a[i] < lower)
b[i] = lower;
else
b[i] = a[i];
}
}
/*
set each entry to its clip value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size)
{
return;
}
/*
set each entry to its clip value
>> a - input tensor we are processing
>> b - output tensor we are processing
>> lower - the lower border
>> upper - the upper border
*/
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower, upper, a->unitNum);
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
/*
clip backward computation of dE/dx (Cuda kernel)
dy/dx = 1 if lower <= x <= upper
0 otherwise
>> dedy - dE/dy
>> dedx - dE/dx
>> y - y of the function
>> x - x of the function
>> lower
>> upper
*/
__global__
void KernelClipBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * y, DTYPE * x, DTYPE lower, DTYPE upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
DTYPE s = x[i];
if (s > upper || s < lower)
dedx[i] = 0;
else
dedx[i] = dedy[i];
}
}
/*
backward computation (Cuda version)
dE/dx = dE/dy * dy/dx
hard tanh: y = upper if x > upper
x if lower <= x <= upper
lower if x< lower
and dy/dx = 1 if lower <= x <= upper
0 otherwise
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> dedx - dE/dx
>> lossName - type of loss function, e.g., cross entropy
*/
void _CudaClipBackward(XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, DTYPE lower, DTYPE upper)
{
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/dx = dE/dy * dy/dx */
KernelClipBackward <<<dim3(gridSize[0]), dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)y->data, (DTYPE*)x->data,
lower, upper,
x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
9ac09a256abbc5ebffeeb26918204a1949755955.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 4
#define t_per_block 2
using namespace std;
void random_ints(int *vec, int size){
for(int i=0; i<size; i++)
vec[i] = i;
}
void random_ints_mat(int *mat, int size){
int k=0;
for(int i=0; i<size; i++){
for(int j=0; j<size; j++){
mat[i*size+j] = k++;
}
}
}
__global__ void addV(int *a,int *b,int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index]+b[index];
}
__global__ void MulMatVec(int *vec,int *mat,int *out_vec, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=vec[i]*mat[(i*n)+index];
out_vec[index] = sum;
}
__global__ void MulMatMat(int *a, int *b, int *c, int n)
{
int row_index = threadIdx.y + blockIdx.y * blockDim.y;
int col_index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=a[(row_index*n)+i] * b[(i*n)+col_index];
c[row_index*n + col_index]=sum;
}
int main()
{
// ----------- Vector Addition ----------------
int *a, *b, *c; //Vectors on host
int *d_a, *d_b, *d_c; //Vectors on device
int v_size = N*sizeof(int);
a = (int *)malloc(v_size);
b = (int *)malloc(v_size);
c = (int *)malloc(v_size);
hipMalloc((void **)&d_a, v_size);
hipMalloc((void **)&d_b, v_size);
hipMalloc((void **)&d_c, v_size);
random_ints(a,N);
random_ints(b,N);
hipMemcpy(d_a, a, v_size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, v_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addV), dim3(N/t_per_block), dim3(t_per_block), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, v_size, hipMemcpyDeviceToHost);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
// ----------- Vector Matrix Multiplication -----------
int *vec, *mat, *out_vec;
vec = (int *)malloc(N*sizeof(int));
mat = (int *)malloc(N*N*sizeof(int));
out_vec = (int *)malloc(N*sizeof(int));
int *d_vec, *d_mat, *d_out_vec;
hipMalloc((void **)&d_vec, N*sizeof(int));
hipMalloc((void **)&d_mat, N*N*sizeof(int));
hipMalloc((void **)&d_out_vec, N*sizeof(int));
random_ints(vec, N);
random_ints(mat, N*N);
hipMemcpy(d_vec, vec, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_mat, mat, N*N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MulMatVec), dim3(N/t_per_block), dim3(t_per_block), 0, 0, d_vec, d_mat, d_out_vec, N);
hipMemcpy(out_vec, d_out_vec, N*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_vec); hipFree(d_mat); hipFree(d_out_vec);
free(vec); free(mat); free(out_vec);
// ----------- Matrix Multiplication -----------
int *mat_a, *mat_b, *mat_c;
int *d_mat_a, *d_mat_b, *d_mat_c;
mat_a = (int *)malloc(N*N*sizeof(int));
mat_b = (int *)malloc(N*N*sizeof(int));
mat_c = (int *)malloc(N*N*sizeof(int));
hipMalloc((void **)&d_mat_a, N*N*sizeof(int));
hipMalloc((void **)&d_mat_b, N*N*sizeof(int));
hipMalloc((void **)&d_mat_c, N*N*sizeof(int));
random_ints_mat(mat_a, N);
random_ints_mat(mat_b, N);
hipMemcpy(d_mat_a, mat_a, N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_mat_b, mat_b, N*N*sizeof(int), hipMemcpyHostToDevice);
dim3 threadsPerBlock(2,2);
dim3 blocksPerGrid(N/threadsPerBlock.x,N/threadsPerBlock.y);
hipLaunchKernelGGL(( MulMatMat), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_mat_a, d_mat_b, d_mat_c, N);
hipMemcpy(mat_c, d_mat_c, N*N*sizeof(int), hipMemcpyDeviceToHost);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", mat_c[i*N + j]);
}
printf("\\n");
}
hipFree(d_mat_a); hipFree(d_mat_b); hipFree(d_mat_c);
free(mat_a); free(mat_b); free(mat_c);
return 0;
} | 9ac09a256abbc5ebffeeb26918204a1949755955.cu | #include<iostream>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 4
#define t_per_block 2
using namespace std;
void random_ints(int *vec, int size){
for(int i=0; i<size; i++)
vec[i] = i;
}
void random_ints_mat(int *mat, int size){
int k=0;
for(int i=0; i<size; i++){
for(int j=0; j<size; j++){
mat[i*size+j] = k++;
}
}
}
__global__ void addV(int *a,int *b,int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index]+b[index];
}
__global__ void MulMatVec(int *vec,int *mat,int *out_vec, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=vec[i]*mat[(i*n)+index];
out_vec[index] = sum;
}
__global__ void MulMatMat(int *a, int *b, int *c, int n)
{
int row_index = threadIdx.y + blockIdx.y * blockDim.y;
int col_index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=a[(row_index*n)+i] * b[(i*n)+col_index];
c[row_index*n + col_index]=sum;
}
int main()
{
// ----------- Vector Addition ----------------
int *a, *b, *c; //Vectors on host
int *d_a, *d_b, *d_c; //Vectors on device
int v_size = N*sizeof(int);
a = (int *)malloc(v_size);
b = (int *)malloc(v_size);
c = (int *)malloc(v_size);
cudaMalloc((void **)&d_a, v_size);
cudaMalloc((void **)&d_b, v_size);
cudaMalloc((void **)&d_c, v_size);
random_ints(a,N);
random_ints(b,N);
cudaMemcpy(d_a, a, v_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, v_size, cudaMemcpyHostToDevice);
addV<<<N/t_per_block, t_per_block>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, v_size, cudaMemcpyDeviceToHost);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
// ----------- Vector Matrix Multiplication -----------
int *vec, *mat, *out_vec;
vec = (int *)malloc(N*sizeof(int));
mat = (int *)malloc(N*N*sizeof(int));
out_vec = (int *)malloc(N*sizeof(int));
int *d_vec, *d_mat, *d_out_vec;
cudaMalloc((void **)&d_vec, N*sizeof(int));
cudaMalloc((void **)&d_mat, N*N*sizeof(int));
cudaMalloc((void **)&d_out_vec, N*sizeof(int));
random_ints(vec, N);
random_ints(mat, N*N);
cudaMemcpy(d_vec, vec, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mat, mat, N*N*sizeof(int), cudaMemcpyHostToDevice);
MulMatVec<<<N/t_per_block, t_per_block>>>(d_vec, d_mat, d_out_vec, N);
cudaMemcpy(out_vec, d_out_vec, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_vec); cudaFree(d_mat); cudaFree(d_out_vec);
free(vec); free(mat); free(out_vec);
// ----------- Matrix Multiplication -----------
int *mat_a, *mat_b, *mat_c;
int *d_mat_a, *d_mat_b, *d_mat_c;
mat_a = (int *)malloc(N*N*sizeof(int));
mat_b = (int *)malloc(N*N*sizeof(int));
mat_c = (int *)malloc(N*N*sizeof(int));
cudaMalloc((void **)&d_mat_a, N*N*sizeof(int));
cudaMalloc((void **)&d_mat_b, N*N*sizeof(int));
cudaMalloc((void **)&d_mat_c, N*N*sizeof(int));
random_ints_mat(mat_a, N);
random_ints_mat(mat_b, N);
cudaMemcpy(d_mat_a, mat_a, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_b, mat_b, N*N*sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(2,2);
dim3 blocksPerGrid(N/threadsPerBlock.x,N/threadsPerBlock.y);
MulMatMat<<<blocksPerGrid,threadsPerBlock>>>(d_mat_a, d_mat_b, d_mat_c, N);
cudaMemcpy(mat_c, d_mat_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", mat_c[i*N + j]);
}
printf("\\n");
}
cudaFree(d_mat_a); cudaFree(d_mat_b); cudaFree(d_mat_c);
free(mat_a); free(mat_b); free(mat_c);
return 0;
} |
515fc4781fec09805554abb6dd67f1ab9968dc8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x;
*v = (i == 0 ? 1 : -1) * (*v);
}
| 515fc4781fec09805554abb6dd67f1ab9968dc8e.cu | #include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x;
*v = (i == 0 ? 1 : -1) * (*v);
}
|
86e19fdac34c23c35be73c1515bb069bad0edeb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void manual_dot_nn_op_float_m1_k3008_n1024_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 32 + lane_id;
if (col_id < 1024)
{
float val = 0;
int k_start = warp_id * 94;
int k_end = (warp_id + 1) * 94;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 1024 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
}
| 86e19fdac34c23c35be73c1515bb069bad0edeb4.cu | extern "C" __global__ void manual_dot_nn_op_float_m1_k3008_n1024_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 32 + lane_id;
if (col_id < 1024)
{
float val = 0;
int k_start = warp_id * 94;
int k_end = (warp_id + 1) * 94;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 1024 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
}
|
8a947d735044383b5410f0a50222ef460eef9994.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <nih/bvh/cuda/sah_builder.h>
#include <nih/sampling/random.h>
#include <nih/time/timer.h>
#include <nih/basic/cuda_domains.h>
namespace nih {
void sah_bvh_test()
{
fprintf(stderr, "sah bvh test... started\n");
const uint32 n_objs = 1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Bbox4f> h_bboxes( n_objs );
Random random;
for (uint32 i = 0; i < n_objs; ++i)
h_bboxes[i] = Bbox4f( Vector4f( random.next(), random.next(), random.next(), 1.0f ) );
thrust::device_vector<Bbox4f> d_bboxes( h_bboxes );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::Sah_builder builder( bvh_nodes, bvh_leaves, bvh_index );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
hipEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_bboxes.begin(),
d_bboxes.begin() + n_objs,
4u,
1.8f );
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
hipEventDestroy( start );
hipEventDestroy( stop );
fprintf(stderr, "sah bvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " objs/sec : %f M\n", (n_objs / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
fprintf(stderr, " levels : %u\n", builder.m_level_count );
fprintf(stderr, " sorting : %f ms\n", builder.m_sorting_time / float(n_tests) );
fprintf(stderr, " compression : %f ms\n", builder.m_compression_time / float(n_tests) );
fprintf(stderr, " sah split : %f ms\n", builder.m_sah_split_time / float(n_tests) );
fprintf(stderr, " distribute objects : %f ms\n", builder.m_distribute_objects_time / float(n_tests) );
fprintf(stderr, " temp storage : %.1f MB\n", float(builder.m_temp_storage) / 1.0e6f );
}
} // namespace nih
| 8a947d735044383b5410f0a50222ef460eef9994.cu | /*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <nih/bvh/cuda/sah_builder.h>
#include <nih/sampling/random.h>
#include <nih/time/timer.h>
#include <nih/basic/cuda_domains.h>
namespace nih {
void sah_bvh_test()
{
fprintf(stderr, "sah bvh test... started\n");
const uint32 n_objs = 1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Bbox4f> h_bboxes( n_objs );
Random random;
for (uint32 i = 0; i < n_objs; ++i)
h_bboxes[i] = Bbox4f( Vector4f( random.next(), random.next(), random.next(), 1.0f ) );
thrust::device_vector<Bbox4f> d_bboxes( h_bboxes );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::Sah_builder builder( bvh_nodes, bvh_leaves, bvh_index );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
cudaEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_bboxes.begin(),
d_bboxes.begin() + n_objs,
4u,
1.8f );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
cudaEventDestroy( start );
cudaEventDestroy( stop );
fprintf(stderr, "sah bvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " objs/sec : %f M\n", (n_objs / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
fprintf(stderr, " levels : %u\n", builder.m_level_count );
fprintf(stderr, " sorting : %f ms\n", builder.m_sorting_time / float(n_tests) );
fprintf(stderr, " compression : %f ms\n", builder.m_compression_time / float(n_tests) );
fprintf(stderr, " sah split : %f ms\n", builder.m_sah_split_time / float(n_tests) );
fprintf(stderr, " distribute objects : %f ms\n", builder.m_distribute_objects_time / float(n_tests) );
fprintf(stderr, " temp storage : %.1f MB\n", float(builder.m_temp_storage) / 1.0e6f );
}
} // namespace nih
|
e3bfa0d77e2ee174d89302a2218c1e164e4ca9e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
#include <iterator>
#include <iostream>
#include <wb.h>
#define BLOCK_SIZE 1024 //@@ You can change this
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
template < typename Operator >
__global__ void scan(float * input, float * output, int len, Operator op )
{
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float tmp[ BLOCK_SIZE ];
int t = threadIdx.x;
int i = blockIdx.x * blockDim.x + t;
if( i < len )
{
tmp[t] = input[i];
for( unsigned int stride = 1; stride <= t; stride *= 2 )
{
__syncthreads();
float inl = tmp[t-stride];
__syncthreads();
tmp[t] = op( tmp[t], inl );
}
output[i] = tmp[t];
}
}
struct Adder
{
__device__ float operator()( float a, float b ) const { return a+b; }
};
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceOutput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid( ceil((double)numElements/BLOCK_SIZE), 1, 1 );
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceInput
, deviceOutput
, numElements
, Adder() );
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
std::copy( hostOutput, hostOutput+numElements
, std::ostream_iterator<float>(std::cout,"\n") );
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
| e3bfa0d77e2ee174d89302a2218c1e164e4ca9e3.cu | // MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
#include <iterator>
#include <iostream>
#include <wb.h>
#define BLOCK_SIZE 1024 //@@ You can change this
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
template < typename Operator >
__global__ void scan(float * input, float * output, int len, Operator op )
{
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float tmp[ BLOCK_SIZE ];
int t = threadIdx.x;
int i = blockIdx.x * blockDim.x + t;
if( i < len )
{
tmp[t] = input[i];
for( unsigned int stride = 1; stride <= t; stride *= 2 )
{
__syncthreads();
float inl = tmp[t-stride];
__syncthreads();
tmp[t] = op( tmp[t], inl );
}
output[i] = tmp[t];
}
}
struct Adder
{
__device__ float operator()( float a, float b ) const { return a+b; }
};
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceOutput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid( ceil((double)numElements/BLOCK_SIZE), 1, 1 );
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
scan<<< dimGrid, dimBlock >>>( deviceInput
, deviceOutput
, numElements
, Adder() );
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
std::copy( hostOutput, hostOutput+numElements
, std::ostream_iterator<float>(std::cout,"\n") );
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
a2afde8a3e11faf643ab01bfeb7946cb634d2444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Zhenyu Ye
* z.ye@tue.nl
* Eindhoven University of Technology
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_TILING_H_
#define _MATRIXMUL_TILING_H_
#include <stdio.h>
#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
matrixMul_tiling( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(tx, ty) = B[b + wB * tx + ty];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| a2afde8a3e11faf643ab01bfeb7946cb634d2444.cu | /*
* Zhenyu Ye
* z.ye@tue.nl
* Eindhoven University of Technology
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_TILING_H_
#define _MATRIXMUL_TILING_H_
#include <stdio.h>
#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
matrixMul_tiling( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(tx, ty) = B[b + wB * tx + ty];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
7234133ffa2c5c449e82421179829ed0cef83b8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Concurrent access on an atimoc counter. Intra Region.
*/
#include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
hipError_t cuErr = call; \
if(hipSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Incrementations per thread
#define B 100
// Block dimension
#define T 512
// Kernel
__global__ void count(int *d_countervar){
for(int i=0; i<B;i++){
atomicAdd_block(d_countervar,1);}
}
// Main program
int main(){
// Device pointer for counter variable
int *d_count;
// Allocation of host counter variable
int *countervar = (int *) malloc(sizeof(int));
// Initialization of the counter variable
countervar[0] = 0;
// Allocation of GPU memory
cudaErrorCheck( hipMalloc(&d_count, sizeof(int)));
// Copying the counter variable from the host to the device
cudaErrorCheck( hipMemcpy(d_count,countervar,sizeof(int),hipMemcpyHostToDevice));
//Launch Kernel
hipLaunchKernelGGL(( count), dim3(1),dim3(T), 0, 0, d_count);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( hipGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( hipDeviceSynchronize());
// Copying the counter variable from the device to the host
cudaErrorCheck( hipMemcpy(countervar,d_count,sizeof(int),hipMemcpyDeviceToHost));
// Verifying result
printf("counter: %i expected: %i \n ", countervar[0], T*B);
// Freeing GPU memory
cudaErrorCheck( hipFree(d_count));
// Freeing CPU memory
free(countervar);
return 0;
} | 7234133ffa2c5c449e82421179829ed0cef83b8f.cu | /*
Concurrent access on an atimoc counter. Intra Region.
*/
#include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Incrementations per thread
#define B 100
// Block dimension
#define T 512
// Kernel
__global__ void count(int *d_countervar){
for(int i=0; i<B;i++){
atomicAdd_block(d_countervar,1);}
}
// Main program
int main(){
// Device pointer for counter variable
int *d_count;
// Allocation of host counter variable
int *countervar = (int *) malloc(sizeof(int));
// Initialization of the counter variable
countervar[0] = 0;
// Allocation of GPU memory
cudaErrorCheck( cudaMalloc(&d_count, sizeof(int)));
// Copying the counter variable from the host to the device
cudaErrorCheck( cudaMemcpy(d_count,countervar,sizeof(int),cudaMemcpyHostToDevice));
//Launch Kernel
count<<<1,T>>>(d_count);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( cudaGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( cudaDeviceSynchronize());
// Copying the counter variable from the device to the host
cudaErrorCheck( cudaMemcpy(countervar,d_count,sizeof(int),cudaMemcpyDeviceToHost));
// Verifying result
printf("counter: %i expected: %i \n ", countervar[0], T*B);
// Freeing GPU memory
cudaErrorCheck( cudaFree(d_count));
// Freeing CPU memory
free(countervar);
return 0;
} |
79014f3d899f2d423d04fcd5a3051566169528cb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "freshman.h"
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2){
if (threadIdx.x % (stride*2) == 0){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceNeighboredLess(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (threadIdx.x >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2){
// first data index of this thread
int index = 2 * idx * stride;
// data add
if (index < blockDim.x){
idata[index] += idata[index + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceInterleave(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceUnroll2(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 32; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
// unrolling sync in blocks(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceCompleteUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// unrolling in blocks
// blockDim.x2k1024
if (blockDim.x >= 1024 && threadIdx.x < 512) idata[threadIdx.x] += idata[threadIdx.x + 512];
__syncthreads();
if (blockDim.x >= 512 && threadIdx.x < 256) idata[threadIdx.x] += idata[threadIdx.x + 256];
__syncthreads();
if (blockDim.x >= 256 && threadIdx.x < 128) idata[threadIdx.x] += idata[threadIdx.x + 128];
__syncthreads();
if (blockDim.x >= 128 && threadIdx.x < 64) idata[threadIdx.x] += idata[threadIdx.x + 64];
__syncthreads();
// unrolling sync in thread cluster(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// unrolling in blocks
//
if (iBlockSize >= 1024 && threadIdx.x < 512) idata[threadIdx.x] += idata[threadIdx.x + 512];
__syncthreads();
if (iBlockSize >= 512 && threadIdx.x < 256) idata[threadIdx.x] += idata[threadIdx.x + 256];
__syncthreads();
if (iBlockSize >= 256 && threadIdx.x < 128) idata[threadIdx.x] += idata[threadIdx.x + 128];
__syncthreads();
if (iBlockSize >= 128 && threadIdx.x < 64) idata[threadIdx.x] += idata[threadIdx.x + 64];
__syncthreads();
// unrolling sync in thread cluster(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0) g_odata[blockIdx.x] = idata[0];
}
int recursiveReduce(int *data, int const size){
if (size == 1) return data[0];
int const stride = size / 2;
for(int i = 0; i < stride; i++){
data[i] += data[i+stride];
}
if ((size % 2)!=0){
data[stride] = data[stride*2];
return recursiveReduce(data, stride+1);
}
return recursiveReduce(data, stride);
}
int main(int argc, char **argv) {
// set up device
initDevice(0);
// set up data
int size = 1<<14;
// set up threads
int blocksize = 512;
if (argc > 1){
blocksize = atoi(argv[1]);
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1)/block.x, 1);
size_t bytes = size * sizeof(int);
int *h_idata = (int *)malloc(bytes);
int *h_odata = (int *)malloc(grid.x * sizeof(int));
initialData_int(h_idata, size);
int *d_idata, *d_odata;
hipMalloc((void **)&d_idata, bytes);
hipMalloc((void **)&d_odata, grid.x*sizeof(int));
double iStart, iElaps;
int gpu_sum;
hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice);
hipDeviceSynchronize();
iStart = cpuSecond();
// reduceCompleteUnrollWarp8<<<grid, block>>>(d_idata, d_odata, size);
switch (blocksize){ //
case 1024:
hipLaunchKernelGGL(( reduceCompleteUnroll<1024>), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 512:
hipLaunchKernelGGL(( reduceCompleteUnroll<512>), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduceCompleteUnroll<256>), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduceCompleteUnroll<128>), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
break;
}
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("GPU Execution configuration<<<(%d,%d),(%d,%d)>>> Time elapsed %f sec\n",
grid.x, grid.y, block.x, block.y, iElaps);
hipMemcpy(h_odata, d_odata, grid.x*sizeof(int), hipMemcpyDeviceToHost);
gpu_sum = 0;
for (int i = 0; i < grid.x; i++){
printf(" %d ", h_odata[i]);
gpu_sum += h_odata[i];
}
printf("reduceNeighbored %d\n", gpu_sum);
int *tmp = (int *)malloc(bytes);
memcpy(tmp, h_idata, bytes);
int result = recursiveReduce(tmp, size);
printf("%d\n", result);
free(h_idata);
free(h_odata);
hipFree(d_idata);
hipFree(d_odata);
hipDeviceReset();
return EXIT_SUCCESS;
} | 79014f3d899f2d423d04fcd5a3051566169528cb.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "freshman.h"
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2){
if (threadIdx.x % (stride*2) == 0){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceNeighboredLess(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (threadIdx.x >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2){
// first data index of this thread
int index = 2 * idx * stride;
// data add
if (index < blockDim.x){
idata[index] += idata[index + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceInterleave(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceUnroll2(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 32; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
// unrolling sync in blocks(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceCompleteUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// unrolling in blocks
// 这种优化需要保证blockDim.x为2的k次幂,且最大为1024
if (blockDim.x >= 1024 && threadIdx.x < 512) idata[threadIdx.x] += idata[threadIdx.x + 512];
__syncthreads();
if (blockDim.x >= 512 && threadIdx.x < 256) idata[threadIdx.x] += idata[threadIdx.x + 256];
__syncthreads();
if (blockDim.x >= 256 && threadIdx.x < 128) idata[threadIdx.x] += idata[threadIdx.x + 128];
__syncthreads();
if (blockDim.x >= 128 && threadIdx.x < 64) idata[threadIdx.x] += idata[threadIdx.x + 64];
__syncthreads();
// unrolling sync in thread cluster(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// unrolling in blocks
// 利用预编译减少线程束分化
if (iBlockSize >= 1024 && threadIdx.x < 512) idata[threadIdx.x] += idata[threadIdx.x + 512];
__syncthreads();
if (iBlockSize >= 512 && threadIdx.x < 256) idata[threadIdx.x] += idata[threadIdx.x + 256];
__syncthreads();
if (iBlockSize >= 256 && threadIdx.x < 128) idata[threadIdx.x] += idata[threadIdx.x + 128];
__syncthreads();
if (iBlockSize >= 128 && threadIdx.x < 64) idata[threadIdx.x] += idata[threadIdx.x + 64];
__syncthreads();
// unrolling sync in thread cluster(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0) g_odata[blockIdx.x] = idata[0];
}
int recursiveReduce(int *data, int const size){
if (size == 1) return data[0];
int const stride = size / 2;
for(int i = 0; i < stride; i++){
data[i] += data[i+stride];
}
if ((size % 2)!=0){
data[stride] = data[stride*2];
return recursiveReduce(data, stride+1);
}
return recursiveReduce(data, stride);
}
int main(int argc, char **argv) {
// set up device
initDevice(0);
// set up data
int size = 1<<14;
// set up threads
int blocksize = 512;
if (argc > 1){
blocksize = atoi(argv[1]);
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1)/block.x, 1);
size_t bytes = size * sizeof(int);
int *h_idata = (int *)malloc(bytes);
int *h_odata = (int *)malloc(grid.x * sizeof(int));
initialData_int(h_idata, size);
int *d_idata, *d_odata;
cudaMalloc((void **)&d_idata, bytes);
cudaMalloc((void **)&d_odata, grid.x*sizeof(int));
double iStart, iElaps;
int gpu_sum;
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = cpuSecond();
// reduceCompleteUnrollWarp8<<<grid, block>>>(d_idata, d_odata, size);
switch (blocksize){ // 调用模板参数不能用变量
case 1024:
reduceCompleteUnroll<1024><<<grid, block>>>(d_idata, d_odata, size);
break;
case 512:
reduceCompleteUnroll<512><<<grid, block>>>(d_idata, d_odata, size);
break;
case 256:
reduceCompleteUnroll<256><<<grid, block>>>(d_idata, d_odata, size);
break;
case 128:
reduceCompleteUnroll<128><<<grid, block>>>(d_idata, d_odata, size);
break;
}
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("GPU Execution configuration<<<(%d,%d),(%d,%d)>>> Time elapsed %f sec\n",
grid.x, grid.y, block.x, block.y, iElaps);
cudaMemcpy(h_odata, d_odata, grid.x*sizeof(int), cudaMemcpyDeviceToHost);
gpu_sum = 0;
for (int i = 0; i < grid.x; i++){
printf(" %d ", h_odata[i]);
gpu_sum += h_odata[i];
}
printf("reduceNeighbored %d\n", gpu_sum);
int *tmp = (int *)malloc(bytes);
memcpy(tmp, h_idata, bytes);
int result = recursiveReduce(tmp, size);
printf("%d\n", result);
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
fa5d5ad9f7438a8e36233a616fa76f0352d9ab8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved.
#include <stdio.h>
#include <vector>
#include "common_cuda_helper.hpp"
#include "trt_plugin_helper.hpp"
using mmdeploy::TensorDesc;
template <typename T>
__global__ void onnx_scatternd_kernel(const int n, const int* indices, const T* update, T* output,
TensorDesc tensor_desc, TensorDesc indice_desc) {
const int indice_cols = indice_desc.shape[indice_desc.dim - 1];
const int copy_stride = tensor_desc.stride[indice_cols - 1];
const int* stride = &(tensor_desc.stride[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
int output_offset = 0;
const int* indices_current = indices + index * indice_cols;
for (int i = 0; i < indice_cols; ++i) {
output_offset += stride[i] * indices_current[i];
}
memcpy(output + output_offset, update + index * copy_stride, copy_stride * sizeof(T));
}
}
template <typename T>
void TRTONNXScatterNDKernelLauncher(const T* data, const int* indices, const T* update,
const int* dims, int nbDims, const int* indices_dims,
int indice_nbDims, T* output, hipStream_t stream) {
// fill tensordesc and initial
TensorDesc tensor_desc;
memset((void*)&tensor_desc, 0, sizeof(TensorDesc));
tensor_desc.dim = nbDims;
tensor_desc.shape[nbDims - 1] = dims[nbDims - 1];
tensor_desc.stride[nbDims - 1] = 1;
for (int i = nbDims - 2; i >= 0; --i) {
tensor_desc.shape[i] = dims[i];
tensor_desc.stride[i] = dims[i + 1] * tensor_desc.stride[i + 1];
}
const int data_size = tensor_desc.stride[0] * tensor_desc.shape[0];
TensorDesc indice_desc;
memset((void*)&indice_desc, 0, sizeof(TensorDesc));
indice_desc.dim = indice_nbDims;
indice_desc.shape[indice_nbDims - 1] = indices_dims[indice_nbDims - 1];
indice_desc.stride[indice_nbDims - 1] = 1;
for (int i = indice_nbDims - 2; i >= 0; --i) {
indice_desc.shape[i] = indices_dims[i];
indice_desc.stride[i] = indices_dims[i + 1] * indice_desc.stride[i + 1];
}
// output = np.copy(data)
hipMemcpyAsync(output, data, data_size * sizeof(T), hipMemcpyDeviceToDevice);
int num_update_indice = 1;
for (int i = 0; i < indice_nbDims - 1; ++i) {
num_update_indice *= indice_desc.shape[i];
}
// scatter
const int col_block = DIVUP(num_update_indice, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( onnx_scatternd_kernel), dim3(col_block), dim3(THREADS_PER_BLOCK), 0, stream,
num_update_indice, indices, update, output, tensor_desc, indice_desc);
}
template void TRTONNXScatterNDKernelLauncher<float>(const float* data, const int* indices,
const float* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, float* output,
hipStream_t stream);
template void TRTONNXScatterNDKernelLauncher<int>(const int* data, const int* indices,
const int* update, const int* dims, int nbDims,
const int* indices_dims, int indice_nbDims,
int* output, hipStream_t stream);
| fa5d5ad9f7438a8e36233a616fa76f0352d9ab8e.cu | // Copyright (c) OpenMMLab. All rights reserved.
#include <stdio.h>
#include <vector>
#include "common_cuda_helper.hpp"
#include "trt_plugin_helper.hpp"
using mmdeploy::TensorDesc;
template <typename T>
__global__ void onnx_scatternd_kernel(const int n, const int* indices, const T* update, T* output,
TensorDesc tensor_desc, TensorDesc indice_desc) {
const int indice_cols = indice_desc.shape[indice_desc.dim - 1];
const int copy_stride = tensor_desc.stride[indice_cols - 1];
const int* stride = &(tensor_desc.stride[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
int output_offset = 0;
const int* indices_current = indices + index * indice_cols;
for (int i = 0; i < indice_cols; ++i) {
output_offset += stride[i] * indices_current[i];
}
memcpy(output + output_offset, update + index * copy_stride, copy_stride * sizeof(T));
}
}
template <typename T>
void TRTONNXScatterNDKernelLauncher(const T* data, const int* indices, const T* update,
const int* dims, int nbDims, const int* indices_dims,
int indice_nbDims, T* output, cudaStream_t stream) {
// fill tensordesc and initial
TensorDesc tensor_desc;
memset((void*)&tensor_desc, 0, sizeof(TensorDesc));
tensor_desc.dim = nbDims;
tensor_desc.shape[nbDims - 1] = dims[nbDims - 1];
tensor_desc.stride[nbDims - 1] = 1;
for (int i = nbDims - 2; i >= 0; --i) {
tensor_desc.shape[i] = dims[i];
tensor_desc.stride[i] = dims[i + 1] * tensor_desc.stride[i + 1];
}
const int data_size = tensor_desc.stride[0] * tensor_desc.shape[0];
TensorDesc indice_desc;
memset((void*)&indice_desc, 0, sizeof(TensorDesc));
indice_desc.dim = indice_nbDims;
indice_desc.shape[indice_nbDims - 1] = indices_dims[indice_nbDims - 1];
indice_desc.stride[indice_nbDims - 1] = 1;
for (int i = indice_nbDims - 2; i >= 0; --i) {
indice_desc.shape[i] = indices_dims[i];
indice_desc.stride[i] = indices_dims[i + 1] * indice_desc.stride[i + 1];
}
// output = np.copy(data)
cudaMemcpyAsync(output, data, data_size * sizeof(T), cudaMemcpyDeviceToDevice);
int num_update_indice = 1;
for (int i = 0; i < indice_nbDims - 1; ++i) {
num_update_indice *= indice_desc.shape[i];
}
// scatter
const int col_block = DIVUP(num_update_indice, THREADS_PER_BLOCK);
onnx_scatternd_kernel<<<col_block, THREADS_PER_BLOCK, 0, stream>>>(
num_update_indice, indices, update, output, tensor_desc, indice_desc);
}
template void TRTONNXScatterNDKernelLauncher<float>(const float* data, const int* indices,
const float* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, float* output,
cudaStream_t stream);
template void TRTONNXScatterNDKernelLauncher<int>(const int* data, const int* indices,
const int* update, const int* dims, int nbDims,
const int* indices_dims, int indice_nbDims,
int* output, cudaStream_t stream);
|
f21080bd19d4244fd5235fcb34bbc85176f63349.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "orttraining/training_ops/cuda/activation/activations_grad_impl.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct OP_GeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Default{});
}
};
template <typename T>
struct OP_FastGeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Approximation{});
}
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseNoBroadcastImpl(lhs_data, rhs_data, \
output_data, \
*reinterpret_cast<const OP_##name<T>*>(func_ctx), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(const T* lhs_data, const T* rhs_data, T* output_data, const Ctx##name* func_ctx, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define ACTIVATION_GRAD_OP_NAME(name) \
BINARY_ELEMENTWISE_IMPL(name); \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(name)
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace cuda
} // namespace onnxruntime
| f21080bd19d4244fd5235fcb34bbc85176f63349.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "orttraining/training_ops/cuda/activation/activations_grad_impl.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct OP_GeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Default{});
}
};
template <typename T>
struct OP_FastGeluGrad : public CtxGeluGrad {
__device__ __inline__ T operator()(const T& dy, const T& x) const {
return ComputeGeluGradScalar(dy, x, gelu_computation_mode::Approximation{});
}
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseNoBroadcastImpl(lhs_data, rhs_data, \
output_data, \
*reinterpret_cast<const OP_##name<T>*>(func_ctx), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(name, T) \
template void Impl_##name<T>(const T* lhs_data, const T* rhs_data, T* output_data, const Ctx##name* func_ctx, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define ACTIVATION_GRAD_OP_NAME(name) \
BINARY_ELEMENTWISE_IMPL(name); \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(name)
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace cuda
} // namespace onnxruntime
|
3358c5bec89eae06231ecae744f5b1f0c1210fae.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (C) 2011 Abhinav Jauhri (abhinav.jauhri@gmail.com), Carnegie Mellon University - Silicon Valley
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "matrix_mul.h"
#include "stdio.h"
#define TILE_WIDTH 32
#define BLOCK 32
namespace cuda
{
__global__
void
matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
if(tx < sq_dimension && ty < sq_dimension){
#pragma unroll
for(int k = 0; k < sq_dimension; k++)
{
sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx];
//__syncthreads();
}
sq_matrix_result[ty*sq_dimension + tx] = sum;
__syncthreads();
}
}
__global__
void
matrix_mul_kernel_shared(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension)
{
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int result_row = blockRow * blockDim.x + threadRow;
int result_col = blockCol * blockDim.x + threadCol;
float sum = 0.0f;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
//printf("%d\n", sq_dimension / blockDim.x);
for (int m = 0; m < (sq_dimension / blockDim.x); m++) {
//printf("blockDim = %d \n", blockDim.x);
// Row i of matrix A
int i = m * blockDim.x + threadRow;
// Column j of matrix A
int j = m * blockDim.x + threadCol;
// Load A(i,j) to shared mem
//if (i < sq_dimension && result_row < sq_dimension) {
As[threadRow][threadCol] = sq_matrix_1[result_row * sq_dimension + j];
//printf("sq_matrix_1[%d * sq_dimension + %d] = %d \n", i, j, sq_matrix_1[i * sq_dimension + j]);
//} else {
// As[threadRow][threadCol] = 0;
//}
// Load B(j,i) to shared mem
//if (j < sq_dimension && result_col < sq_dimension) {
Bs[threadRow][threadCol] = sq_matrix_2[i * sq_dimension + result_col]; // Global Mem Not coalesced
//} else {
// Bs[threadCol][threadRow] = 0;
//}
// Synchronize before computation
__syncthreads();
// printf("As[%d][%d] = %d, ", threadRow, threadCol, As[threadRow][threadCol]);
// printf("Bs[%d][%d] = %d \n", threadCol, threadRow, Bs[threadCol][threadRow]);
// printf("sq_matrix_2[%d * sq_dimension + %d] = %d \n", j, i, sq_matrix_2[j * sq_dimension + i]);
//if (result_col < TILE_WIDTH && result_col < TILE_WIDTH) {
for (int k = 0; k < blockDim.x; k++) {
// Accumulate for matrix C
// printf("As[%d][%d] = %d * ", threadRow, k, As[threadRow][k]);
// printf("Bs[%d][%d] = %d = ", k, Bs[k][threadCol]);
sum += As[threadRow][k] * Bs[k][threadCol];
//printf("sum = %d \n", sum);
}
//}
// Synchronize
__syncthreads();
}
//if (result_row < sq_dimension && result_col < sq_dimension) {
sq_matrix_result[result_row * sq_dimension + result_col] = sum;
//}
// if(tx < sq_dimension && ty < sq_dimension){
// #pragma unroll
// for(int k = 0; k < sq_dimension; k++)
// {
// sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx];
// //__syncthreads();
// }
// sq_matrix_result[ty*sq_dimension + tx] = sum;
// __syncthreads();
// }
}
void
matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension)
{
int size = sq_dimension * sq_dimension * sizeof(float);
float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d;
/***************************************************
1st Part: Allocation of memory on device memory
****************************************************/
/* copy sq_matrix_1 and sq_matrix_2 to device memory */
// sq_matrix_1 = (float*)malloc(size);
hipMalloc((void**) &sq_matrix_1_d, size);
hipMemcpy(sq_matrix_1_d, sq_matrix_1, size, hipMemcpyHostToDevice);
// sq_matrix_2 = (float*)malloc(size * sizeof(float));
hipMalloc((void**) &sq_matrix_2_d, size);
hipMemcpy(sq_matrix_2_d, sq_matrix_2, size, hipMemcpyHostToDevice);
/*allocate sq_matrix_result on host */
// sq_matrix_result = (float*)malloc(size * sizeof(float));
hipMalloc((void**) &sq_matrix_result_d, size);
/***************************************************
2nd Part: Inovke kernel
****************************************************/
// // naive version
// if (sq_dimension <= BLOCK) {
// dim3 dimBlock(sq_dimension, sq_dimension);
// dim3 dimGrid(1, 1);
// matrix_mul_kernel<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
// } else {
// dim3 dimBlock(BLOCK, BLOCK);
// int blockNum = (sq_dimension * sq_dimension + BLOCK *BLOCK - 1)/ (BLOCK *BLOCK);
// dim3 dimGrid(blockNum,blockNum);
// matrix_mul_kernel<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
// }
// shared memory
//printf("sq_dimension = %d\n", sq_dimension);
if (sq_dimension <= TILE_WIDTH) {
dim3 dimBlock(sq_dimension, sq_dimension);
dim3 dimGrid(1, 1);
// printf("blockDim = %d", blockDim.x);
hipLaunchKernelGGL(( matrix_mul_kernel_shared), dim3(dimGrid), dim3(dimBlock), 0, 0, sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
} else {
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
int blockNum = (sq_dimension * sq_dimension + TILE_WIDTH * TILE_WIDTH - 1)/ (TILE_WIDTH * TILE_WIDTH);
printf("sq_dimension = %d, blockNum = %d", sq_dimension, blockNum);
dim3 dimGrid(blockNum, blockNum);
hipLaunchKernelGGL(( matrix_mul_kernel_shared), dim3(dimGrid), dim3(dimBlock), 0, 0, sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
}
// dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
// int blockNum = ceil(sq_dimension*1.0/TILE_WIDTH);
// dim3 dimGrid(blockNum,blockNum);
/***************************************************
3rd Part: Transfer result from device to host
****************************************************/
hipMemcpy(sq_matrix_result, sq_matrix_result_d, size, hipMemcpyDeviceToHost);
hipFree(sq_matrix_1_d);
hipFree(sq_matrix_2_d);
hipFree(sq_matrix_result_d);
// free(sq_matrix_1);
}
} // namespace cuda
| 3358c5bec89eae06231ecae744f5b1f0c1210fae.cu | /*
Copyright (C) 2011 Abhinav Jauhri (abhinav.jauhri@gmail.com), Carnegie Mellon University - Silicon Valley
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "matrix_mul.h"
#include "stdio.h"
#define TILE_WIDTH 32
#define BLOCK 32
namespace cuda
{
__global__
void
matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0.0f;
if(tx < sq_dimension && ty < sq_dimension){
#pragma unroll
for(int k = 0; k < sq_dimension; k++)
{
sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx];
//__syncthreads();
}
sq_matrix_result[ty*sq_dimension + tx] = sum;
__syncthreads();
}
}
__global__
void
matrix_mul_kernel_shared(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension)
{
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int result_row = blockRow * blockDim.x + threadRow;
int result_col = blockCol * blockDim.x + threadCol;
float sum = 0.0f;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
//printf("%d\n", sq_dimension / blockDim.x);
for (int m = 0; m < (sq_dimension / blockDim.x); m++) {
//printf("blockDim = %d \n", blockDim.x);
// Row i of matrix A
int i = m * blockDim.x + threadRow;
// Column j of matrix A
int j = m * blockDim.x + threadCol;
// Load A(i,j) to shared mem
//if (i < sq_dimension && result_row < sq_dimension) {
As[threadRow][threadCol] = sq_matrix_1[result_row * sq_dimension + j];
//printf("sq_matrix_1[%d * sq_dimension + %d] = %d \n", i, j, sq_matrix_1[i * sq_dimension + j]);
//} else {
// As[threadRow][threadCol] = 0;
//}
// Load B(j,i) to shared mem
//if (j < sq_dimension && result_col < sq_dimension) {
Bs[threadRow][threadCol] = sq_matrix_2[i * sq_dimension + result_col]; // Global Mem Not coalesced
//} else {
// Bs[threadCol][threadRow] = 0;
//}
// Synchronize before computation
__syncthreads();
// printf("As[%d][%d] = %d, ", threadRow, threadCol, As[threadRow][threadCol]);
// printf("Bs[%d][%d] = %d \n", threadCol, threadRow, Bs[threadCol][threadRow]);
// printf("sq_matrix_2[%d * sq_dimension + %d] = %d \n", j, i, sq_matrix_2[j * sq_dimension + i]);
//if (result_col < TILE_WIDTH && result_col < TILE_WIDTH) {
for (int k = 0; k < blockDim.x; k++) {
// Accumulate for matrix C
// printf("As[%d][%d] = %d * ", threadRow, k, As[threadRow][k]);
// printf("Bs[%d][%d] = %d = ", k, Bs[k][threadCol]);
sum += As[threadRow][k] * Bs[k][threadCol];
//printf("sum = %d \n", sum);
}
//}
// Synchronize
__syncthreads();
}
//if (result_row < sq_dimension && result_col < sq_dimension) {
sq_matrix_result[result_row * sq_dimension + result_col] = sum;
//}
// if(tx < sq_dimension && ty < sq_dimension){
// #pragma unroll
// for(int k = 0; k < sq_dimension; k++)
// {
// sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx];
// //__syncthreads();
// }
// sq_matrix_result[ty*sq_dimension + tx] = sum;
// __syncthreads();
// }
}
void
matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension)
{
int size = sq_dimension * sq_dimension * sizeof(float);
float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d;
/***************************************************
1st Part: Allocation of memory on device memory
****************************************************/
/* copy sq_matrix_1 and sq_matrix_2 to device memory */
// sq_matrix_1 = (float*)malloc(size);
cudaMalloc((void**) &sq_matrix_1_d, size);
cudaMemcpy(sq_matrix_1_d, sq_matrix_1, size, cudaMemcpyHostToDevice);
// sq_matrix_2 = (float*)malloc(size * sizeof(float));
cudaMalloc((void**) &sq_matrix_2_d, size);
cudaMemcpy(sq_matrix_2_d, sq_matrix_2, size, cudaMemcpyHostToDevice);
/*allocate sq_matrix_result on host */
// sq_matrix_result = (float*)malloc(size * sizeof(float));
cudaMalloc((void**) &sq_matrix_result_d, size);
/***************************************************
2nd Part: Inovke kernel
****************************************************/
// // naive version
// if (sq_dimension <= BLOCK) {
// dim3 dimBlock(sq_dimension, sq_dimension);
// dim3 dimGrid(1, 1);
// matrix_mul_kernel<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
// } else {
// dim3 dimBlock(BLOCK, BLOCK);
// int blockNum = (sq_dimension * sq_dimension + BLOCK *BLOCK - 1)/ (BLOCK *BLOCK);
// dim3 dimGrid(blockNum,blockNum);
// matrix_mul_kernel<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
// }
// shared memory
//printf("sq_dimension = %d\n", sq_dimension);
if (sq_dimension <= TILE_WIDTH) {
dim3 dimBlock(sq_dimension, sq_dimension);
dim3 dimGrid(1, 1);
// printf("blockDim = %d", blockDim.x);
matrix_mul_kernel_shared<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
} else {
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
int blockNum = (sq_dimension * sq_dimension + TILE_WIDTH * TILE_WIDTH - 1)/ (TILE_WIDTH * TILE_WIDTH);
printf("sq_dimension = %d, blockNum = %d", sq_dimension, blockNum);
dim3 dimGrid(blockNum, blockNum);
matrix_mul_kernel_shared<<<dimGrid, dimBlock>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension);
}
// dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
// int blockNum = ceil(sq_dimension*1.0/TILE_WIDTH);
// dim3 dimGrid(blockNum,blockNum);
/***************************************************
3rd Part: Transfer result from device to host
****************************************************/
cudaMemcpy(sq_matrix_result, sq_matrix_result_d, size, cudaMemcpyDeviceToHost);
cudaFree(sq_matrix_1_d);
cudaFree(sq_matrix_2_d);
cudaFree(sq_matrix_result_d);
// free(sq_matrix_1);
}
} // namespace cuda
|
31db4b040580b1538b977c19b91d07308a54ab78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "DS_timer.h"
/*
* A : m x k
* B : k x n
* C : m x n
*/
int main(int argc, char* argv[])
{
int m, n, k;
float *
dim3 gridDim(ceil((float)m / ), ceil(), 1)
return 0;
}
| 31db4b040580b1538b977c19b91d07308a54ab78.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "DS_timer.h"
/*
* A : m x k
* B : k x n
* C : m x n
*/
int main(int argc, char* argv[])
{
int m, n, k;
float *
dim3 gridDim(ceil((float)m / ), ceil(), 1)
return 0;
}
|
39e0feb3fefac6516fa2a78731243c65617a520a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct GlobalConstants {
float3* points;
float* gaussianMask;
float* gradXMask;
float* gradYMask;
uint* clusters;
};
__constant__ GlobalConstants params;
__global__ void kernelGaussanFilter(float* filtered, float* image) {
float tot = 0;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
for(int i=0;i<5;i++) {
for(int j=0; j<5;j++) {
tot += gaussianMask[i*5+j] * image[row+i][col+j];
}
}
filtered[row][col] = tot
}
__global__ void kernelGradients(float* gradient, float* angle, float image) {
float gradX = 0;
float gradY = 0;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
for(int i=-1;i<2;i++) {
for(int j=-1; j<2;j++) {
gradX += gradXMask[(i+1)*3+j+1] * image[row+i][col+j];
gradY += gradYMask[(i+1)*3+j+1] * image[row+i][col+j];
}
}
gradient[row][col] = sqrt(gradX^2+gradY^2);
angle[row][col] = arctan(gradY/gradX);
}
void
kmeans::kmeans() {
dim3 blockDim(32,32);
dim3 gridDim(width/32, height/32);
hipLaunchKernelGGL(( kernelGaussanFilter), dim3(gridDim),dim3(blockDim), 0, 0, filtered, image);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernelGradients), dim3(gridDim),dim3(blockDim), 0, 0, gradient, angle, image);
hipDeviceSynchronize();
}
| 39e0feb3fefac6516fa2a78731243c65617a520a.cu | struct GlobalConstants {
float3* points;
float* gaussianMask;
float* gradXMask;
float* gradYMask;
uint* clusters;
};
__constant__ GlobalConstants params;
__global__ void kernelGaussanFilter(float* filtered, float* image) {
float tot = 0;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
for(int i=0;i<5;i++) {
for(int j=0; j<5;j++) {
tot += gaussianMask[i*5+j] * image[row+i][col+j];
}
}
filtered[row][col] = tot
}
__global__ void kernelGradients(float* gradient, float* angle, float image) {
float gradX = 0;
float gradY = 0;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
for(int i=-1;i<2;i++) {
for(int j=-1; j<2;j++) {
gradX += gradXMask[(i+1)*3+j+1] * image[row+i][col+j];
gradY += gradYMask[(i+1)*3+j+1] * image[row+i][col+j];
}
}
gradient[row][col] = sqrt(gradX^2+gradY^2);
angle[row][col] = arctan(gradY/gradX);
}
void
kmeans::kmeans() {
dim3 blockDim(32,32);
dim3 gridDim(width/32, height/32);
kernelGaussanFilter<<<gridDim,blockDim>>>(filtered, image);
cudaDeviceSynchronize();
kernelGradients<<<gridDim,blockDim>>>(gradient, angle, image);
cudaDeviceSynchronize();
}
|
c5814d44221e7382347f3e0f2e78ef08ee344aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/core/solver/m2l.cu
/// @brief Compute multipole to local
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/core.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_core
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute multipole to local
///
/// @param[in] base_dim the number of cells in base level per side.
/// @param[in] level_dim the number of cells in this level per side.
/// @param[in] cell_size the size of cell of this level.
/// @param[in] cell_level_position the cell positions of this level.
/// @param[in] cell_level_weight the cell weights of this level.
/// @param[out] cell_level_effect the cell effects of this level.
///
__global__ void m2lDevice(
const int base_dim,
const int level_dim,
const int cell_size,
const float2* cell_level_position,
const float* cell_level_weight,
float2* cell_level_effect
) {
const int target_x = threadIdx.x + blockIdx.x * blockDim.x;
const int target_y = threadIdx.y + blockIdx.y * blockDim.y;
if ( target_x >= level_dim || target_y >= level_dim ) {
return;
}
const int parent_x = target_x & ~1;
const int parent_y = target_y & ~1;
const int target_idx = (target_x + target_y * base_dim) * cell_size;
const float2 target_position = cell_level_position[target_idx];
float2 target_effect = make_float2(0.0f, 0.0f);
// Go through children of parent cell's neighbors
for ( int y = parent_y-2; y < parent_y+4; ++y ) {
if ( y >= 0 && y < level_dim ) {
for ( int x = parent_x-2; x < parent_x+4; ++x ) {
if ( x >= 0 && x < level_dim ) {
// Ignore target cell's neighbors
if ( abs(x-target_x) > 1 || abs(y-target_y) > 1 ) {
int idx = (x + y * base_dim) * cell_size;
target_effect += nbfmm::kernelFunction(target_position, cell_level_position[idx], cell_level_weight[idx]);
}
}
}
}
}
cell_level_effect[target_idx] = target_effect;
}
/// @}
// M2L
void nbfmm::Solver::m2l() {
if ( num_level_ <= 0 ) {
return;
}
int level_dim = base_dim_;
int cell_size = 1;
for ( auto level = 0; level < num_level_; ++level, level_dim /= 2, cell_size *= 2 ) {
const int block_dim_side = (level_dim < kMaxBlockDim) ? level_dim : kMaxBlockDim;
const int grid_dim_side = (level_dim < kMaxBlockDim) ? 1 : (level_dim / block_dim_side);
const dim3 block_dim(block_dim_side, block_dim_side);
const dim3 grid_dim(grid_dim_side, grid_dim_side);
const int offset = level * base_dim_ * base_dim_;
hipLaunchKernelGGL(( m2lDevice), dim3(block_dim), dim3(grid_dim), 0, 0, base_dim_, level_dim, cell_size,
gpuptr_cell_position_ + offset, gpuptr_cell_weight_ + offset, gpuptr_cell_effect_ + offset);
}
}
| c5814d44221e7382347f3e0f2e78ef08ee344aa4.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/core/solver/m2l.cu
/// @brief Compute multipole to local
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/core.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_core
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute multipole to local
///
/// @param[in] base_dim the number of cells in base level per side.
/// @param[in] level_dim the number of cells in this level per side.
/// @param[in] cell_size the size of cell of this level.
/// @param[in] cell_level_position the cell positions of this level.
/// @param[in] cell_level_weight the cell weights of this level.
/// @param[out] cell_level_effect the cell effects of this level.
///
__global__ void m2lDevice(
const int base_dim,
const int level_dim,
const int cell_size,
const float2* cell_level_position,
const float* cell_level_weight,
float2* cell_level_effect
) {
const int target_x = threadIdx.x + blockIdx.x * blockDim.x;
const int target_y = threadIdx.y + blockIdx.y * blockDim.y;
if ( target_x >= level_dim || target_y >= level_dim ) {
return;
}
const int parent_x = target_x & ~1;
const int parent_y = target_y & ~1;
const int target_idx = (target_x + target_y * base_dim) * cell_size;
const float2 target_position = cell_level_position[target_idx];
float2 target_effect = make_float2(0.0f, 0.0f);
// Go through children of parent cell's neighbors
for ( int y = parent_y-2; y < parent_y+4; ++y ) {
if ( y >= 0 && y < level_dim ) {
for ( int x = parent_x-2; x < parent_x+4; ++x ) {
if ( x >= 0 && x < level_dim ) {
// Ignore target cell's neighbors
if ( abs(x-target_x) > 1 || abs(y-target_y) > 1 ) {
int idx = (x + y * base_dim) * cell_size;
target_effect += nbfmm::kernelFunction(target_position, cell_level_position[idx], cell_level_weight[idx]);
}
}
}
}
}
cell_level_effect[target_idx] = target_effect;
}
/// @}
// M2L
void nbfmm::Solver::m2l() {
if ( num_level_ <= 0 ) {
return;
}
int level_dim = base_dim_;
int cell_size = 1;
for ( auto level = 0; level < num_level_; ++level, level_dim /= 2, cell_size *= 2 ) {
const int block_dim_side = (level_dim < kMaxBlockDim) ? level_dim : kMaxBlockDim;
const int grid_dim_side = (level_dim < kMaxBlockDim) ? 1 : (level_dim / block_dim_side);
const dim3 block_dim(block_dim_side, block_dim_side);
const dim3 grid_dim(grid_dim_side, grid_dim_side);
const int offset = level * base_dim_ * base_dim_;
m2lDevice<<<block_dim, grid_dim>>>(base_dim_, level_dim, cell_size,
gpuptr_cell_position_ + offset, gpuptr_cell_weight_ + offset, gpuptr_cell_effect_ + offset);
}
}
|
af19416f2920054fe9ffb09c9a20a09bf92e3121.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "utils.h"
#define VERBOSE 1
#define WINDOW_LENGTH 25
#define AXIS 3
#define OUTPUT_FILE "output.csv"
int main( int argc, char* argv[] )
{
/* Local declarations */
int *xcoord_loc, *ycoord_loc, *zcoord_loc;
int* mag, *intense, *means, *max, *min;
float* sd;
int numOfLines, numOfReads;
size_t coord_size, window_size;
/* Device declarations */
int *tempx, *tempy, *tempz, *x_dev, *y_dev, *z_dev;
int *mag_dev, *in_dev, *means_dev, *max_dev, *min_dev;
float* sd_dev;
hipError_t err = hipSuccess;
/* Read in the data from CSV file */
if( ( readData( &xcoord_loc, &ycoord_loc, &zcoord_loc, &numOfLines ) )!= 0 ){
fprintf( stderr, "Reading of csv file failed!" );
return 1;
}
/* Initializes some variables based on the data */
numOfReads = numOfLines - (WINDOW_LENGTH - 1);
coord_size = numOfLines * sizeof( int );
window_size = numOfReads * WINDOW_LENGTH * sizeof( int );
/* Allocate local result arrays */
allocateLocal( &mag, &intense, &means, &sd, &max, &min, numOfReads );
/* Allocates all device memory */
allocate_dev( &tempx, window_size );
allocate_dev( &tempy, window_size );
allocate_dev( &tempz, window_size );
allocate_dev( &x_dev, coord_size );
allocate_dev( &y_dev, coord_size );
allocate_dev( &z_dev, coord_size );
allocate_dev( &mag_dev, numOfReads * sizeof(int) );
allocate_dev( &in_dev, numOfReads * sizeof(int) );
allocate_dev( &means_dev, numOfReads * AXIS * sizeof(int) );
allocate_dev( &max_dev, numOfReads * AXIS * sizeof(int) );
allocate_dev( &min_dev, numOfReads * AXIS * sizeof(int) );
allocate_devf( &sd_dev, numOfReads * AXIS * sizeof(float) );
/* Copy the original data coordinates to the device */
err = hipMemcpy( x_dev, xcoord_loc, coord_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy xcoords from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( y_dev, ycoord_loc, coord_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy ycoords from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( z_dev, zcoord_loc, coord_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy zcoords from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Run massively parrallel CUDA function */
int threadsPerBlock = 256;
int blocksPerGrid =( numOfLines + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( cudaMagic), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, mag_dev, in_dev, means_dev, sd_dev, max_dev, min_dev, tempx, tempy, tempz, x_dev, y_dev, z_dev, numOfLines ,numOfReads );
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy the result arrays back to the host */
err = hipMemcpy( mag, mag_dev, numOfReads * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy MAGNATUDE from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( intense, in_dev, numOfReads * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy INTENSITY from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( means, means_dev, numOfReads * AXIS * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy MEANS from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( sd, sd_dev, numOfReads * AXIS * sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy STANDARD DEVIATION from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( max, max_dev, numOfReads * AXIS * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy MAX from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy( min, min_dev, numOfReads * AXIS * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy MIN from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Writes out the final csv file */
writeCSV( OUTPUT_FILE, &mag, &intense, &means, &sd, &max, &min, numOfReads);
/* Free dynamic memory */
free( xcoord_loc );
free( ycoord_loc );
free( zcoord_loc );
free( mag );
free( intense );
free( means );
free( sd );
free( max );
free( min );
/* Free CUDA memory */
hipFree(x_dev);
hipFree(y_dev);
hipFree(z_dev);
hipFree(tempx);
hipFree(tempy);
hipFree(tempz);
hipFree( mag_dev );
hipFree( in_dev );
hipFree( means_dev );
hipFree( sd_dev );
hipFree( max_dev );
hipFree( min_dev );
}
| af19416f2920054fe9ffb09c9a20a09bf92e3121.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include "utils.h"
#define VERBOSE 1
#define WINDOW_LENGTH 25
#define AXIS 3
#define OUTPUT_FILE "output.csv"
int main( int argc, char* argv[] )
{
/* Local declarations */
int *xcoord_loc, *ycoord_loc, *zcoord_loc;
int* mag, *intense, *means, *max, *min;
float* sd;
int numOfLines, numOfReads;
size_t coord_size, window_size;
/* Device declarations */
int *tempx, *tempy, *tempz, *x_dev, *y_dev, *z_dev;
int *mag_dev, *in_dev, *means_dev, *max_dev, *min_dev;
float* sd_dev;
cudaError_t err = cudaSuccess;
/* Read in the data from CSV file */
if( ( readData( &xcoord_loc, &ycoord_loc, &zcoord_loc, &numOfLines ) )!= 0 ){
fprintf( stderr, "Reading of csv file failed!" );
return 1;
}
/* Initializes some variables based on the data */
numOfReads = numOfLines - (WINDOW_LENGTH - 1);
coord_size = numOfLines * sizeof( int );
window_size = numOfReads * WINDOW_LENGTH * sizeof( int );
/* Allocate local result arrays */
allocateLocal( &mag, &intense, &means, &sd, &max, &min, numOfReads );
/* Allocates all device memory */
allocate_dev( &tempx, window_size );
allocate_dev( &tempy, window_size );
allocate_dev( &tempz, window_size );
allocate_dev( &x_dev, coord_size );
allocate_dev( &y_dev, coord_size );
allocate_dev( &z_dev, coord_size );
allocate_dev( &mag_dev, numOfReads * sizeof(int) );
allocate_dev( &in_dev, numOfReads * sizeof(int) );
allocate_dev( &means_dev, numOfReads * AXIS * sizeof(int) );
allocate_dev( &max_dev, numOfReads * AXIS * sizeof(int) );
allocate_dev( &min_dev, numOfReads * AXIS * sizeof(int) );
allocate_devf( &sd_dev, numOfReads * AXIS * sizeof(float) );
/* Copy the original data coordinates to the device */
err = cudaMemcpy( x_dev, xcoord_loc, coord_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy xcoords from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( y_dev, ycoord_loc, coord_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy ycoords from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( z_dev, zcoord_loc, coord_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy zcoords from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Run massively parrallel CUDA function */
int threadsPerBlock = 256;
int blocksPerGrid =( numOfLines + threadsPerBlock - 1) / threadsPerBlock;
cudaMagic<<<blocksPerGrid, threadsPerBlock>>>( mag_dev, in_dev, means_dev, sd_dev, max_dev, min_dev, tempx, tempy, tempz, x_dev, y_dev, z_dev, numOfLines ,numOfReads );
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy the result arrays back to the host */
err = cudaMemcpy( mag, mag_dev, numOfReads * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy MAGNATUDE from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( intense, in_dev, numOfReads * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy INTENSITY from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( means, means_dev, numOfReads * AXIS * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy MEANS from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( sd, sd_dev, numOfReads * AXIS * sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy STANDARD DEVIATION from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( max, max_dev, numOfReads * AXIS * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy MAX from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy( min, min_dev, numOfReads * AXIS * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy MIN from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Writes out the final csv file */
writeCSV( OUTPUT_FILE, &mag, &intense, &means, &sd, &max, &min, numOfReads);
/* Free dynamic memory */
free( xcoord_loc );
free( ycoord_loc );
free( zcoord_loc );
free( mag );
free( intense );
free( means );
free( sd );
free( max );
free( min );
/* Free CUDA memory */
cudaFree(x_dev);
cudaFree(y_dev);
cudaFree(z_dev);
cudaFree(tempx);
cudaFree(tempy);
cudaFree(tempz);
cudaFree( mag_dev );
cudaFree( in_dev );
cudaFree( means_dev );
cudaFree( sd_dev );
cudaFree( max_dev );
cudaFree( min_dev );
}
|
8896e6350382d92b0e6833cb0c85076c444e99eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "correlation_cuda_kernel.cuh"
#define CUDA_NUM_THREADS 1024
#define THREADS_PER_BLOCK 32
#define FULL_MASK 0xffffffff
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
using at::Half;
template<typename scalar_t>
__forceinline__ __device__ scalar_t warpReduceSum(scalar_t val) {
for (int offset = 16; offset > 0; offset /= 2)
val += __shfl_down_sync(FULL_MASK, val, offset);
return val;
}
template<typename scalar_t>
__forceinline__ __device__ scalar_t blockReduceSum(scalar_t val) {
static __shared__ scalar_t shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid == 0)
val = warpReduceSum(val);
return val;
}
template <typename scalar_t>
__global__ void channels_first(const scalar_t* __restrict__ input, scalar_t* rinput, int channels, int height, int width, int pad_size)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = blockIdx.x;
int y = blockIdx.y;
int x = blockIdx.z;
int ch_off = threadIdx.x;
scalar_t value;
int dimcyx = channels * height * width;
int dimyx = height * width;
int p_dimx = (width + 2 * pad_size);
int p_dimy = (height + 2 * pad_size);
int p_dimyxc = channels * p_dimy * p_dimx;
int p_dimxc = p_dimx * channels;
for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) {
value = input[n * dimcyx + c * dimyx + y * width + x];
rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value;
}
}
template<typename scalar_t>
__global__ void correlation_forward(scalar_t* __restrict__ output, const int nOutputChannels,
const int outputHeight, const int outputWidth, const scalar_t* __restrict__ rInput1,
const int nInputChannels, const int inputHeight, const int inputWidth,
const scalar_t* __restrict__ rInput2, const int pad_size, const int kernel_size,
const int max_displacement, const int stride1, const int stride2) {
int32_t pInputWidth = inputWidth + 2 * pad_size;
int32_t pInputHeight = inputHeight + 2 * pad_size;
int32_t kernel_rad = (kernel_size - 1) / 2;
int32_t displacement_rad = max_displacement / stride2;
int32_t displacement_size = 2 * displacement_rad + 1;
int32_t n = blockIdx.x;
int32_t y1 = blockIdx.y * stride1 + max_displacement;
int32_t x1 = blockIdx.z * stride1 + max_displacement;
int32_t c = threadIdx.x;
int32_t pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int32_t pdimxc = pInputWidth * nInputChannels;
int32_t pdimc = nInputChannels;
int32_t tdimcyx = nOutputChannels * outputHeight * outputWidth;
int32_t tdimyx = outputHeight * outputWidth;
int32_t tdimx = outputWidth;
int32_t nelems = kernel_size * kernel_size * pdimc;
// element-wise product along channel axis
for (int tj = -displacement_rad; tj <= displacement_rad; ++tj) {
for (int ti = -displacement_rad; ti <= displacement_rad; ++ti) {
int x2 = x1 + ti * stride2;
int y2 = y1 + tj * stride2;
float acc0 = 0.0f;
for (int j = -kernel_rad; j <= kernel_rad; ++j) {
for (int i = -kernel_rad; i <= kernel_rad; ++i) {
// THREADS_PER_BLOCK
#pragma unroll
for (int ch = c; ch < pdimc; ch += blockDim.x) {
int indx1 = n * pdimyxc + (y1 + j) * pdimxc
+ (x1 + i) * pdimc + ch;
int indx2 = n * pdimyxc + (y2 + j) * pdimxc
+ (x2 + i) * pdimc + ch;
acc0 += static_cast<float>(rInput1[indx1] * rInput2[indx2]);
}
}
}
if (blockDim.x == warpSize) {
__syncwarp();
acc0 = warpReduceSum(acc0);
} else {
__syncthreads();
acc0 = blockReduceSum(acc0);
}
if (threadIdx.x == 0) {
int tc = (tj + displacement_rad) * displacement_size
+ (ti + displacement_rad);
const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx
+ blockIdx.z;
output[tindx] = static_cast<scalar_t>(acc0 / nelems);
}
}
}
}
template <typename scalar_t>
__global__ void correlation_backward_input1(int item, scalar_t* gradInput1, int nInputChannels, int inputHeight, int inputWidth,
const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
const scalar_t* __restrict__ rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int xmin = (x - kernel_rad - max_displacement) / stride1;
int ymin = (y - kernel_rad - max_displacement) / stride1;
int xmax = (x + kernel_rad - max_displacement) / stride1;
int ymax = (y + kernel_rad - max_displacement) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
scalar_t nelems = kernel_size * kernel_size * nInputChannels;
__shared__ scalar_t prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c;
scalar_t val2 = rInput2[indx2];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val2;
}
}
}
__syncthreads();
if(tch_off == 0) {
scalar_t reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput1[indx1] = reduce_sum / nelems;
}
}
template <typename scalar_t>
__global__ void correlation_backward_input2(int item, scalar_t* gradInput2, int nInputChannels, int inputHeight, int inputWidth,
const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
const scalar_t* __restrict__ rInput1,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
scalar_t nelems = kernel_size * kernel_size * nInputChannels;
__shared__ scalar_t prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int xmin = (x - kernel_rad - max_displacement - i2) / stride1;
int ymin = (y - kernel_rad - max_displacement - j2) / stride1;
int xmax = (x + kernel_rad - max_displacement - i2) / stride1;
int ymax = (y + kernel_rad - max_displacement - j2) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c;
scalar_t val1 = rInput1[indx1];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val1;
}
}
}
__syncthreads();
if(tch_off == 0) {
scalar_t reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput2[indx2] = reduce_sum / nelems;
}
}
int correlation_forward_cuda_kernel(at::Tensor& output,
int ob,
int oc,
int oh,
int ow,
int osb,
int osc,
int osh,
int osw,
at::Tensor& input1,
int ic,
int ih,
int iw,
int isb,
int isc,
int ish,
int isw,
at::Tensor& input2,
int gc,
int gsb,
int gsc,
int gsh,
int gsw,
at::Tensor& rInput1,
at::Tensor& rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
hipStream_t stream)
{
int batchSize = ob;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = oc;
int outputWidth = ow;
int outputHeight = oh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "channels_first_fwd_1", ([&] {
hipLaunchKernelGGL(( channels_first<scalar_t>), dim3(blocks_grid),dim3(threads_block), 0, stream,
input1.data_ptr<scalar_t>(), rInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size);
}));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "channels_first_fwd_2", ([&] {
hipLaunchKernelGGL(( channels_first<scalar_t>), dim3(blocks_grid),dim3(threads_block), 0, stream,
input2.data_ptr<scalar_t>(), rInput2.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size);
}));
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "correlation_forward", ([&] {
hipLaunchKernelGGL(( correlation_forward<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock), 0, stream,
output.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
rInput2.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
hipError_t err = hipGetLastError();
// check for errors
if (err != hipSuccess) {
printf("error in correlation_forward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int correlation_backward_cuda_kernel(
at::Tensor& gradOutput,
int gob,
int goc,
int goh,
int gow,
int gosb,
int gosc,
int gosh,
int gosw,
at::Tensor& input1,
int ic,
int ih,
int iw,
int isb,
int isc,
int ish,
int isw,
at::Tensor& input2,
int gsb,
int gsc,
int gsh,
int gsw,
at::Tensor& gradInput1,
int gisb,
int gisc,
int gish,
int gisw,
at::Tensor& gradInput2,
int ggc,
int ggsb,
int ggsc,
int ggsh,
int ggsw,
at::Tensor& rInput1,
at::Tensor& rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
hipStream_t stream)
{
int batchSize = gob;
int num = batchSize;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = goc;
int outputWidth = gow;
int outputHeight = goh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( channels_first<scalar_t>), dim3(blocks_grid), dim3(threads_block), 0, stream,
input1.data_ptr<scalar_t>(),
rInput1.data_ptr<scalar_t>(),
nInputChannels,
inputHeight,
inputWidth,
pad_size
);
}));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( channels_first<scalar_t>), dim3(blocks_grid), dim3(threads_block), 0, stream,
input2.data_ptr<scalar_t>(),
rInput2.data_ptr<scalar_t>(),
nInputChannels,
inputHeight,
inputWidth,
pad_size
);
}));
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels);
for (int n = 0; n < num; ++n) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( correlation_backward_input1<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock), 0, stream,
n, gradInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
gradOutput.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput2.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
}
for(int n = 0; n < batchSize; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(rInput1.scalar_type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( correlation_backward_input2<scalar_t>), dim3(totalBlocksCorr), dim3(threadsPerBlock), 0, stream,
n, gradInput2.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
gradOutput.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput1.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
}
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in correlation_backward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
| 8896e6350382d92b0e6833cb0c85076c444e99eb.cu | #include <stdio.h>
#include "correlation_cuda_kernel.cuh"
#define CUDA_NUM_THREADS 1024
#define THREADS_PER_BLOCK 32
#define FULL_MASK 0xffffffff
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
using at::Half;
template<typename scalar_t>
__forceinline__ __device__ scalar_t warpReduceSum(scalar_t val) {
for (int offset = 16; offset > 0; offset /= 2)
val += __shfl_down_sync(FULL_MASK, val, offset);
return val;
}
template<typename scalar_t>
__forceinline__ __device__ scalar_t blockReduceSum(scalar_t val) {
static __shared__ scalar_t shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid == 0)
val = warpReduceSum(val);
return val;
}
template <typename scalar_t>
__global__ void channels_first(const scalar_t* __restrict__ input, scalar_t* rinput, int channels, int height, int width, int pad_size)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = blockIdx.x;
int y = blockIdx.y;
int x = blockIdx.z;
int ch_off = threadIdx.x;
scalar_t value;
int dimcyx = channels * height * width;
int dimyx = height * width;
int p_dimx = (width + 2 * pad_size);
int p_dimy = (height + 2 * pad_size);
int p_dimyxc = channels * p_dimy * p_dimx;
int p_dimxc = p_dimx * channels;
for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) {
value = input[n * dimcyx + c * dimyx + y * width + x];
rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value;
}
}
template<typename scalar_t>
__global__ void correlation_forward(scalar_t* __restrict__ output, const int nOutputChannels,
const int outputHeight, const int outputWidth, const scalar_t* __restrict__ rInput1,
const int nInputChannels, const int inputHeight, const int inputWidth,
const scalar_t* __restrict__ rInput2, const int pad_size, const int kernel_size,
const int max_displacement, const int stride1, const int stride2) {
int32_t pInputWidth = inputWidth + 2 * pad_size;
int32_t pInputHeight = inputHeight + 2 * pad_size;
int32_t kernel_rad = (kernel_size - 1) / 2;
int32_t displacement_rad = max_displacement / stride2;
int32_t displacement_size = 2 * displacement_rad + 1;
int32_t n = blockIdx.x;
int32_t y1 = blockIdx.y * stride1 + max_displacement;
int32_t x1 = blockIdx.z * stride1 + max_displacement;
int32_t c = threadIdx.x;
int32_t pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int32_t pdimxc = pInputWidth * nInputChannels;
int32_t pdimc = nInputChannels;
int32_t tdimcyx = nOutputChannels * outputHeight * outputWidth;
int32_t tdimyx = outputHeight * outputWidth;
int32_t tdimx = outputWidth;
int32_t nelems = kernel_size * kernel_size * pdimc;
// element-wise product along channel axis
for (int tj = -displacement_rad; tj <= displacement_rad; ++tj) {
for (int ti = -displacement_rad; ti <= displacement_rad; ++ti) {
int x2 = x1 + ti * stride2;
int y2 = y1 + tj * stride2;
float acc0 = 0.0f;
for (int j = -kernel_rad; j <= kernel_rad; ++j) {
for (int i = -kernel_rad; i <= kernel_rad; ++i) {
// THREADS_PER_BLOCK
#pragma unroll
for (int ch = c; ch < pdimc; ch += blockDim.x) {
int indx1 = n * pdimyxc + (y1 + j) * pdimxc
+ (x1 + i) * pdimc + ch;
int indx2 = n * pdimyxc + (y2 + j) * pdimxc
+ (x2 + i) * pdimc + ch;
acc0 += static_cast<float>(rInput1[indx1] * rInput2[indx2]);
}
}
}
if (blockDim.x == warpSize) {
__syncwarp();
acc0 = warpReduceSum(acc0);
} else {
__syncthreads();
acc0 = blockReduceSum(acc0);
}
if (threadIdx.x == 0) {
int tc = (tj + displacement_rad) * displacement_size
+ (ti + displacement_rad);
const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx
+ blockIdx.z;
output[tindx] = static_cast<scalar_t>(acc0 / nelems);
}
}
}
}
template <typename scalar_t>
__global__ void correlation_backward_input1(int item, scalar_t* gradInput1, int nInputChannels, int inputHeight, int inputWidth,
const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
const scalar_t* __restrict__ rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int xmin = (x - kernel_rad - max_displacement) / stride1;
int ymin = (y - kernel_rad - max_displacement) / stride1;
int xmax = (x + kernel_rad - max_displacement) / stride1;
int ymax = (y + kernel_rad - max_displacement) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
scalar_t nelems = kernel_size * kernel_size * nInputChannels;
__shared__ scalar_t prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c;
scalar_t val2 = rInput2[indx2];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val2;
}
}
}
__syncthreads();
if(tch_off == 0) {
scalar_t reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput1[indx1] = reduce_sum / nelems;
}
}
template <typename scalar_t>
__global__ void correlation_backward_input2(int item, scalar_t* gradInput2, int nInputChannels, int inputHeight, int inputWidth,
const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth,
const scalar_t* __restrict__ rInput1,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
scalar_t nelems = kernel_size * kernel_size * nInputChannels;
__shared__ scalar_t prod_sum[THREADS_PER_BLOCK];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int xmin = (x - kernel_rad - max_displacement - i2) / stride1;
int ymin = (y - kernel_rad - max_displacement - j2) / stride1;
int xmax = (x + kernel_rad - max_displacement - i2) / stride1;
int ymax = (y + kernel_rad - max_displacement - j2) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput2 is pre-allocated and zero filled
continue;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c;
scalar_t val1 = rInput1[indx1];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val1;
}
}
}
__syncthreads();
if(tch_off == 0) {
scalar_t reduce_sum = 0;
for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput2[indx2] = reduce_sum / nelems;
}
}
int correlation_forward_cuda_kernel(at::Tensor& output,
int ob,
int oc,
int oh,
int ow,
int osb,
int osc,
int osh,
int osw,
at::Tensor& input1,
int ic,
int ih,
int iw,
int isb,
int isc,
int ish,
int isw,
at::Tensor& input2,
int gc,
int gsb,
int gsc,
int gsh,
int gsw,
at::Tensor& rInput1,
at::Tensor& rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
cudaStream_t stream)
{
int batchSize = ob;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = oc;
int outputWidth = ow;
int outputHeight = oh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "channels_first_fwd_1", ([&] {
channels_first<scalar_t><<<blocks_grid,threads_block, 0, stream>>>(
input1.data_ptr<scalar_t>(), rInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size);
}));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "channels_first_fwd_2", ([&] {
channels_first<scalar_t><<<blocks_grid,threads_block, 0, stream>>> (
input2.data_ptr<scalar_t>(), rInput2.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size);
}));
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "correlation_forward", ([&] {
correlation_forward<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>>
(output.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
rInput2.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
cudaError_t err = cudaGetLastError();
// check for errors
if (err != cudaSuccess) {
printf("error in correlation_forward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int correlation_backward_cuda_kernel(
at::Tensor& gradOutput,
int gob,
int goc,
int goh,
int gow,
int gosb,
int gosc,
int gosh,
int gosw,
at::Tensor& input1,
int ic,
int ih,
int iw,
int isb,
int isc,
int ish,
int isw,
at::Tensor& input2,
int gsb,
int gsc,
int gsh,
int gsw,
at::Tensor& gradInput1,
int gisb,
int gisc,
int gish,
int gisw,
at::Tensor& gradInput2,
int ggc,
int ggsb,
int ggsc,
int ggsh,
int ggsw,
at::Tensor& rInput1,
at::Tensor& rInput2,
int pad_size,
int kernel_size,
int max_displacement,
int stride1,
int stride2,
int corr_type_multiply,
cudaStream_t stream)
{
int batchSize = gob;
int num = batchSize;
int nInputChannels = ic;
int inputWidth = iw;
int inputHeight = ih;
int nOutputChannels = goc;
int outputWidth = gow;
int outputHeight = goh;
dim3 blocks_grid(batchSize, inputHeight, inputWidth);
dim3 threads_block(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "lltm_forward_cuda", ([&] {
channels_first<scalar_t><<<blocks_grid, threads_block, 0, stream>>>(
input1.data_ptr<scalar_t>(),
rInput1.data_ptr<scalar_t>(),
nInputChannels,
inputHeight,
inputWidth,
pad_size
);
}));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] {
channels_first<scalar_t><<<blocks_grid, threads_block, 0, stream>>>(
input2.data_ptr<scalar_t>(),
rInput2.data_ptr<scalar_t>(),
nInputChannels,
inputHeight,
inputWidth,
pad_size
);
}));
dim3 threadsPerBlock(THREADS_PER_BLOCK);
dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels);
for (int n = 0; n < num; ++n) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] {
correlation_backward_input1<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>> (
n, gradInput1.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
gradOutput.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput2.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
}
for(int n = 0; n < batchSize; n++) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(rInput1.scalar_type(), "lltm_forward_cuda", ([&] {
correlation_backward_input2<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>>(
n, gradInput2.data_ptr<scalar_t>(), nInputChannels, inputHeight, inputWidth,
gradOutput.data_ptr<scalar_t>(), nOutputChannels, outputHeight, outputWidth,
rInput1.data_ptr<scalar_t>(),
pad_size,
kernel_size,
max_displacement,
stride1,
stride2);
}));
}
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in correlation_backward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
|
7a2448c958f621724b6fd45520a927dbdb43b851.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void test_input_args(int* buffer_arg, int x) {
int val = buffer_arg[1];
buffer_arg[2] = 42;
buffer_arg[x] = 42;
buffer_arg[x + 1] = 42;
x = 1;
buffer_arg[x] = 42;
} | 7a2448c958f621724b6fd45520a927dbdb43b851.cu | __global__ void test_input_args(int* buffer_arg, int x) {
int val = buffer_arg[1];
buffer_arg[2] = 42;
buffer_arg[x] = 42;
buffer_arg[x + 1] = 42;
x = 1;
buffer_arg[x] = 42;
} |
df47c963722be9679f1ef4eed33ed296f5ad4137.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @file copymakeborder.cu
* @brief The kernel and invocation definitions of forming a border around an
* image.
*/
#include "copymakeborder.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define DEVICE_INLINE
#if defined(DEVICE_INLINE)
# define __DEVICE__ __device__ __forceinline__
#else
# define __DEVICE__ __device__
#endif
__DEVICE__
int borderInterpolate0(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index < 0) {
return 0 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 2;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index < 0) {
return -1 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 1;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index < 0) {
return index + range;
}
else if (index < range) {
return index;
}
else {
return index - range;
}
}
else {
return -2;
}
}
__DEVICE__
int borderInterpolate1(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = 0 - index;
else
index = (range << 1) - index - 2;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = -1 - index;
else
index = (range << 1) - index - 1;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index += range;
else
index -= range;
} while (index >= range || index < 0);
}
return index;
}
}
else {
return -2;
}
}
template <typename T0, typename T1>
__DEVICE__
T0 makeValuen(T1 value);
template <>
__DEVICE__
uchar makeValuen<uchar, uchar>(uchar value) {
return value;
}
template <>
__DEVICE__
uchar3 makeValuen<uchar3, uchar>(uchar value) {
return make_uchar3(value, value, value);
}
template <>
__DEVICE__
uchar4 makeValuen<uchar4, uchar>(uchar value) {
return make_uchar4(value, value, value, value);
}
template <>
__DEVICE__
float makeValuen<float, float>(float value) {
return value;
}
template <>
__DEVICE__
float3 makeValuen<float3, float>(float value) {
return make_float3(value, value, value);
}
template <>
__DEVICE__
float4 makeValuen<float4, float>(float value) {
return make_float4(value, value, value, value);
}
template <typename T0, typename T1>
__global__
void copyMakeBorderKernel(const T0* src, int rows, int cols, int src_stride,
T0* dst, int dst_stride, int top, int bottom,
int left, int right, BorderType border_type,
T1 border_value, bool small_border) {
int element_x, element_y;
if (sizeof(T1) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= (rows + top + bottom) ||
element_x >= (cols + left + right)) {
return;
}
int src_x = element_x - left;
int src_y = element_y - top;
if (small_border == true) {
src_x = borderInterpolate0(src_x, cols, border_type);
src_y = borderInterpolate0(src_y, rows, border_type);
}
else {
src_x = borderInterpolate1(src_x, cols, border_type);
src_y = borderInterpolate1(src_y, rows, border_type);
}
T0 value;
T0 *input, *output;
if (border_type != BORDER_TYPE_CONSTANT) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
if (src_x != -1 && src_y != -1) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
value = makeValuen<T0, T1>(border_value);
}
}
output = (T0*)((uchar*)dst + element_y * dst_stride);
output[element_x] = value;
}
RetCode copyMakeBorder(const uchar* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
uchar border_value, hipStream_t stream) {
if (src == nullptr || dst == nullptr || rows < 1 || cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < cols * channels * (int)sizeof(uchar) ||
dst_stride < cols * channels * (int)sizeof(uchar) ||
top < 0 || bottom < 0 || left < 0 || right < 0 ||
(border_type != BORDER_TYPE_CONSTANT &&
border_type != BORDER_TYPE_REPLICATE &&
border_type != BORDER_TYPE_REFLECT &&
border_type != BORDER_TYPE_WRAP &&
border_type != BORDER_TYPE_REFLECT_101 &&
border_type != BORDER_TYPE_DEFAULT)) {
return RC_INVALID_VALUE;
}
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
hipMemcpy(dst, src, rows * src_stride, hipMemcpyDeviceToDevice);
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp((cols + left + right), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp((rows + top + bottom), kBlockDimY0, kBlockShiftY0);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar, uchar>), dim3(grid), dim3(block), 0, stream, src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar3, uchar>), dim3(grid), dim3(block), 0, stream,
(uchar3*)src, rows, cols, src_stride, (uchar3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else if (channels == 4) {
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar4, uchar>), dim3(grid), dim3(block), 0, stream,
(uchar4*)src, rows, cols, src_stride, (uchar4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else {
}
return RC_SUCCESS;
}
RetCode copyMakeBorder(const float* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
float border_value, hipStream_t stream) {
if (src == nullptr || dst == nullptr || rows < 1 || cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < cols * channels * (int)sizeof(float) ||
dst_stride < cols * channels * (int)sizeof(float) ||
top < 0 || bottom < 0 || left < 0 || right < 0 ||
(border_type != BORDER_TYPE_CONSTANT &&
border_type != BORDER_TYPE_REPLICATE &&
border_type != BORDER_TYPE_REFLECT &&
border_type != BORDER_TYPE_WRAP &&
border_type != BORDER_TYPE_REFLECT_101 &&
border_type != BORDER_TYPE_DEFAULT)) {
return RC_INVALID_VALUE;
}
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
hipMemcpy(dst, src, rows * src_stride, hipMemcpyDeviceToDevice);
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp((cols + left + right), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp((rows + top + bottom), kBlockDimY1, kBlockShiftY1);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
hipLaunchKernelGGL(( copyMakeBorderKernel<float, float>), dim3(grid), dim3(block), 0, stream, src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
hipLaunchKernelGGL(( copyMakeBorderKernel<float3, float>), dim3(grid), dim3(block), 0, stream,
(float3*)src, rows, cols, src_stride, (float3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else if (channels == 4) {
hipLaunchKernelGGL(( copyMakeBorderKernel<float4, float>), dim3(grid), dim3(block), 0, stream,
(float4*)src, rows, cols, src_stride, (float4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else {
}
return RC_SUCCESS;
}
template <>
RetCode CopyMakeBorder<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
| df47c963722be9679f1ef4eed33ed296f5ad4137.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @file copymakeborder.cu
* @brief The kernel and invocation definitions of forming a border around an
* image.
*/
#include "copymakeborder.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define DEVICE_INLINE
#if defined(DEVICE_INLINE)
# define __DEVICE__ __device__ __forceinline__
#else
# define __DEVICE__ __device__
#endif
__DEVICE__
int borderInterpolate0(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index < 0) {
return 0 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 2;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index < 0) {
return -1 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 1;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index < 0) {
return index + range;
}
else if (index < range) {
return index;
}
else {
return index - range;
}
}
else {
return -2;
}
}
__DEVICE__
int borderInterpolate1(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = 0 - index;
else
index = (range << 1) - index - 2;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = -1 - index;
else
index = (range << 1) - index - 1;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index += range;
else
index -= range;
} while (index >= range || index < 0);
}
return index;
}
}
else {
return -2;
}
}
template <typename T0, typename T1>
__DEVICE__
T0 makeValuen(T1 value);
template <>
__DEVICE__
uchar makeValuen<uchar, uchar>(uchar value) {
return value;
}
template <>
__DEVICE__
uchar3 makeValuen<uchar3, uchar>(uchar value) {
return make_uchar3(value, value, value);
}
template <>
__DEVICE__
uchar4 makeValuen<uchar4, uchar>(uchar value) {
return make_uchar4(value, value, value, value);
}
template <>
__DEVICE__
float makeValuen<float, float>(float value) {
return value;
}
template <>
__DEVICE__
float3 makeValuen<float3, float>(float value) {
return make_float3(value, value, value);
}
template <>
__DEVICE__
float4 makeValuen<float4, float>(float value) {
return make_float4(value, value, value, value);
}
template <typename T0, typename T1>
__global__
void copyMakeBorderKernel(const T0* src, int rows, int cols, int src_stride,
T0* dst, int dst_stride, int top, int bottom,
int left, int right, BorderType border_type,
T1 border_value, bool small_border) {
int element_x, element_y;
if (sizeof(T1) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= (rows + top + bottom) ||
element_x >= (cols + left + right)) {
return;
}
int src_x = element_x - left;
int src_y = element_y - top;
if (small_border == true) {
src_x = borderInterpolate0(src_x, cols, border_type);
src_y = borderInterpolate0(src_y, rows, border_type);
}
else {
src_x = borderInterpolate1(src_x, cols, border_type);
src_y = borderInterpolate1(src_y, rows, border_type);
}
T0 value;
T0 *input, *output;
if (border_type != BORDER_TYPE_CONSTANT) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
if (src_x != -1 && src_y != -1) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
value = makeValuen<T0, T1>(border_value);
}
}
output = (T0*)((uchar*)dst + element_y * dst_stride);
output[element_x] = value;
}
RetCode copyMakeBorder(const uchar* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
uchar border_value, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || rows < 1 || cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < cols * channels * (int)sizeof(uchar) ||
dst_stride < cols * channels * (int)sizeof(uchar) ||
top < 0 || bottom < 0 || left < 0 || right < 0 ||
(border_type != BORDER_TYPE_CONSTANT &&
border_type != BORDER_TYPE_REPLICATE &&
border_type != BORDER_TYPE_REFLECT &&
border_type != BORDER_TYPE_WRAP &&
border_type != BORDER_TYPE_REFLECT_101 &&
border_type != BORDER_TYPE_DEFAULT)) {
return RC_INVALID_VALUE;
}
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
cudaMemcpy(dst, src, rows * src_stride, cudaMemcpyDeviceToDevice);
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp((cols + left + right), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp((rows + top + bottom), kBlockDimY0, kBlockShiftY0);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
copyMakeBorderKernel<uchar, uchar><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
copyMakeBorderKernel<uchar3, uchar><<<grid, block, 0, stream>>>(
(uchar3*)src, rows, cols, src_stride, (uchar3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else if (channels == 4) {
copyMakeBorderKernel<uchar4, uchar><<<grid, block, 0, stream>>>(
(uchar4*)src, rows, cols, src_stride, (uchar4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else {
}
return RC_SUCCESS;
}
RetCode copyMakeBorder(const float* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
float border_value, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || rows < 1 || cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < cols * channels * (int)sizeof(float) ||
dst_stride < cols * channels * (int)sizeof(float) ||
top < 0 || bottom < 0 || left < 0 || right < 0 ||
(border_type != BORDER_TYPE_CONSTANT &&
border_type != BORDER_TYPE_REPLICATE &&
border_type != BORDER_TYPE_REFLECT &&
border_type != BORDER_TYPE_WRAP &&
border_type != BORDER_TYPE_REFLECT_101 &&
border_type != BORDER_TYPE_DEFAULT)) {
return RC_INVALID_VALUE;
}
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
cudaMemcpy(dst, src, rows * src_stride, cudaMemcpyDeviceToDevice);
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp((cols + left + right), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp((rows + top + bottom), kBlockDimY1, kBlockShiftY1);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
copyMakeBorderKernel<float, float><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
copyMakeBorderKernel<float3, float><<<grid, block, 0, stream>>>(
(float3*)src, rows, cols, src_stride, (float3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else if (channels == 4) {
copyMakeBorderKernel<float4, float><<<grid, block, 0, stream>>>(
(float4*)src, rows, cols, src_stride, (float4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else {
}
return RC_SUCCESS;
}
template <>
RetCode CopyMakeBorder<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
97c1bdcf2c302e1908c5b9dbfffa5139480d425c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out, hipStream_t stream) {
hipError_t err;
hipLaunchKernelGGL(( gather_points_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, stream,
b, c, n, npoints, points, idx, out);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points,
hipStream_t stream) {
hipError_t err;
hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0,
stream, b, c, n, npoints, grad_out, idx,
grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3)
continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, hipStream_t stream) {
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 97c1bdcf2c302e1908c5b9dbfffa5139480d425c.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out, cudaStream_t stream) {
cudaError_t err;
gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, stream>>>(
b, c, n, npoints, points, idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points,
cudaStream_t stream) {
cudaError_t err;
gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
stream>>>(b, c, n, npoints, grad_out, idx,
grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3)
continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, cudaStream_t stream) {
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
5efd92b479ec89f3129b8f7ac0b33823fc95a628.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void addForces(const real* __restrict__ forces, long long* __restrict__ forceBuffers, int* __restrict__ atomIndex) {
for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) {
int index = atomIndex[atom];
forceBuffers[atom] += (long long) (forces[3*index]*0x100000000);
forceBuffers[atom+PADDED_NUM_ATOMS] += (long long) (forces[3*index+1]*0x100000000);
forceBuffers[atom+2*PADDED_NUM_ATOMS] += (long long) (forces[3*index+2]*0x100000000);
}
}
| 5efd92b479ec89f3129b8f7ac0b33823fc95a628.cu | extern "C" __global__
void addForces(const real* __restrict__ forces, long long* __restrict__ forceBuffers, int* __restrict__ atomIndex) {
for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) {
int index = atomIndex[atom];
forceBuffers[atom] += (long long) (forces[3*index]*0x100000000);
forceBuffers[atom+PADDED_NUM_ATOMS] += (long long) (forces[3*index+1]*0x100000000);
forceBuffers[atom+2*PADDED_NUM_ATOMS] += (long long) (forces[3*index+2]*0x100000000);
}
}
|
28cea6c4e3ba1d84f7e10fc4f64202f242137d30.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <cstdlib>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2\core.hpp>
#include <opencv2\imgproc.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgcodecs.hpp>
#define MAX_THREADS 32
using namespace std;
using namespace cv;
enum curves { Line, Circle, ArchimedeanSpiral, Cardioid }; // kinds of curves, definitely there're others interesting curves
__constant__ float PI = 3.1415;
// a line equation like y = k * x + b, so the pixels meet this equation
__global__ void drawLine(float* src, size_t inputPitch, int rows, int cols, float slope, float pitch, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
if (fabsf(y - (slope*x + pitch)) <= thickness)
{
float* outputPixel = (float*)((char*)dst +row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
// a circle equation like (x-a)**2 + (y-b)**2 = radius**2, so the pixels meet this equation
__global__ void drawCircle(float* src, size_t inputPitch, int rows, int cols, float centerX, float centerY, float radius, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float t = sqrtf(powf(x - centerX, 2) + powf(y - centerY, 2));
if (t >= radius && t <= radius + thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
/*
change cartesian coordinate to polor coordinate, example point M(x, y), target P(radius, theta), radius = sqrt(x*x + y*y), theta = arctan(y / x), and
archimedean spiral equation like this: radius = a + b*theta, just let the point of pixel meets the equation
*/
__global__ void drawSpiral(float* src, size_t inputPitch, int rows, int cols, float a, float b, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float radius = sqrtf(x * x + y * y);
float theta = atan2f(y, x);
if (fabsf(radius - (a + theta*b)) <= thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
/*
change cartesian coordinate to polor coordinate, example point M(x, y), target P(radius, theta), radius = sqrt(x*x + y*y), theta = arctan(y / x),
and archimedean spiral equation like this: radius = a*(1 - cos(theta)) for vertical direction, radius = a*(1 - sin(theta)) for horizonal direction,
just let the point of pixel meets the equation.
*/
__global__ void drawHeart(float* src, size_t inputPitch, int rows, int cols, float a, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float radius = sqrtf(x * x+y * y);
float theta = atan2f(y, x);
if (fabsf(radius - a*(1 - sinf(theta))) <=thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
void drawCurves(const Mat & input, Mat & output, curves type, float thickness)
{
// define blocks size and threads size
int deviceCount;
hipGetDeviceCount(&deviceCount);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceCount - 1);
/*
my sample image size is 801 * 601, so we need 801 * 601 threads to process this image on device at least,
each block can contain 1024 threads at most in my device, so ,I can define block size as x = 801 / 32 = 26, y = 801 / 32 = 19
*/
dim3 blockSize(input.cols / MAX_THREADS + 1, input.rows / MAX_THREADS + 1);
dim3 threadSize(MAX_THREADS, MAX_THREADS);
size_t inputPitch, outputPitch;
float* src; float* dst;
hipStream_t inputStream, outputStream;
hipStreamCreate(&inputStream); hipStreamCreate(&outputStream);
hipMallocPitch(&src, &inputPitch, sizeof(float)*input.cols, input.rows);
hipMemcpy2DAsync(src, inputPitch, input.data, sizeof(float)*input.cols, sizeof(float)*input.cols, input.rows, hipMemcpyHostToDevice, inputStream);
hipMallocPitch(&dst, &outputPitch, sizeof(float)*output.cols, output.rows);
hipMemcpy2DAsync(dst, outputPitch, output.data, sizeof(float)*output.cols, sizeof(float)*output.cols, output.rows, hipMemcpyHostToDevice, outputStream);
hipStreamSynchronize(inputStream); hipStreamSynchronize(outputStream);
hipError_t error;
switch (type)
{
case Line:
{
float theta = 45.0;
float slope = tan(theta*3.14 / 180.0);
float pitch = 30.0;
hipLaunchKernelGGL(( drawLine) , dim3(blockSize), dim3(threadSize), 0, 0, src, inputPitch, input.rows, input.cols, slope, pitch, dst, outputPitch, thickness);
break;
}
case Circle:
{
float centerX = 20.0;
float centerY = 50.0;
float radius = 80.0;
hipLaunchKernelGGL(( drawCircle) , dim3(blockSize), dim3(threadSize), 0, 0, src, inputPitch, input.rows, input.cols, centerX, centerY, radius, dst, outputPitch, thickness);
break;
}
case ArchimedeanSpiral:
{
float a = 100.0;
float b = 40.0;
hipLaunchKernelGGL(( drawSpiral) , dim3(blockSize), dim3(threadSize), 0, 0, src, inputPitch, input.rows, input.cols, a, b, dst, outputPitch, thickness);
break;
}
case Cardioid:
{
float amp = 80.0;
hipLaunchKernelGGL(( drawHeart) , dim3(blockSize), dim3(threadSize), 0, 0, src, inputPitch, input.rows, input.cols, amp, dst, outputPitch, thickness);
break;
}
default:
break;
}
error = hipDeviceSynchronize();
if (error != hipSuccess)
{
cout << hipGetErrorString(error) << endl;
}
hipMemcpy2D(output.data, sizeof(float)*output.cols, dst, outputPitch, sizeof(float)*output.cols, output.rows, hipMemcpyDeviceToHost);
// resource releasing
hipStreamDestroy(inputStream); hipStreamDestroy(outputStream);
hipFree(src); hipFree(dst);
}
int main()
{
Mat white(Size(801, 601), CV_8U, Scalar(255)); // use odd number of size is convenient for computing
white.convertTo(white, CV_32F);
Mat result = white.clone(); // deeply copy
float time;
hipEvent_t start, end;
hipEventCreate(&start); hipEventCreate(&end);
hipEventRecord(start);
float thickness = 5;
drawCurves(white, result, Line, thickness);
drawCurves(white, result, Circle, thickness);
drawCurves(white, result, ArchimedeanSpiral, thickness);
drawCurves(white, result, Cardioid, thickness);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
cout << "time cost on device: " << time << " ms." << endl;
hipEventDestroy(start); hipEventDestroy(end);
result.convertTo(result, CV_8U);
string title = "CUDA";
namedWindow(title);
imshow(title, result);
waitKey(0);
return 0;
} | 28cea6c4e3ba1d84f7e10fc4f64202f242137d30.cu | #include <stdio.h>
#include <iostream>
#include <cmath>
#include <cstdlib>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2\core.hpp>
#include <opencv2\imgproc.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgcodecs.hpp>
#define MAX_THREADS 32
using namespace std;
using namespace cv;
enum curves { Line, Circle, ArchimedeanSpiral, Cardioid }; // kinds of curves, definitely there're others interesting curves
__constant__ float PI = 3.1415;
// a line equation like y = k * x + b, so the pixels meet this equation
__global__ void drawLine(float* src, size_t inputPitch, int rows, int cols, float slope, float pitch, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
if (fabsf(y - (slope*x + pitch)) <= thickness)
{
float* outputPixel = (float*)((char*)dst +row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
// a circle equation like (x-a)**2 + (y-b)**2 = radius**2, so the pixels meet this equation
__global__ void drawCircle(float* src, size_t inputPitch, int rows, int cols, float centerX, float centerY, float radius, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float t = sqrtf(powf(x - centerX, 2) + powf(y - centerY, 2));
if (t >= radius && t <= radius + thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
/*
change cartesian coordinate to polor coordinate, example point M(x, y), target P(radius, theta), radius = sqrt(x*x + y*y), theta = arctan(y / x), and
archimedean spiral equation like this: radius = a + b*theta, just let the point of pixel meets the equation
*/
__global__ void drawSpiral(float* src, size_t inputPitch, int rows, int cols, float a, float b, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float radius = sqrtf(x * x + y * y);
float theta = atan2f(y, x);
if (fabsf(radius - (a + theta*b)) <= thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
/*
change cartesian coordinate to polor coordinate, example point M(x, y), target P(radius, theta), radius = sqrt(x*x + y*y), theta = arctan(y / x),
and archimedean spiral equation like this: radius = a*(1 - cos(theta)) for vertical direction, radius = a*(1 - sin(theta)) for horizonal direction,
just let the point of pixel meets the equation.
*/
__global__ void drawHeart(float* src, size_t inputPitch, int rows, int cols, float a, float* dst, size_t outputPitch, float thickness)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
float x = (float)(col - cols / 2);
float y = (float)(rows / 2 - row);
if (row < rows&&col < cols)
{
float radius = sqrtf(x * x+y * y);
float theta = atan2f(y, x);
if (fabsf(radius - a*(1 - sinf(theta))) <=thickness)
{
float* outputPixel = (float*)((char*)dst + row*outputPitch) + col;
*outputPixel = 0.0; // make this point of pixel value as black
}
}
}
void drawCurves(const Mat & input, Mat & output, curves type, float thickness)
{
// define blocks size and threads size
int deviceCount;
cudaGetDeviceCount(&deviceCount);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceCount - 1);
/*
my sample image size is 801 * 601, so we need 801 * 601 threads to process this image on device at least,
each block can contain 1024 threads at most in my device, so ,I can define block size as x = 801 / 32 = 26, y = 801 / 32 = 19
*/
dim3 blockSize(input.cols / MAX_THREADS + 1, input.rows / MAX_THREADS + 1);
dim3 threadSize(MAX_THREADS, MAX_THREADS);
size_t inputPitch, outputPitch;
float* src; float* dst;
cudaStream_t inputStream, outputStream;
cudaStreamCreate(&inputStream); cudaStreamCreate(&outputStream);
cudaMallocPitch(&src, &inputPitch, sizeof(float)*input.cols, input.rows);
cudaMemcpy2DAsync(src, inputPitch, input.data, sizeof(float)*input.cols, sizeof(float)*input.cols, input.rows, cudaMemcpyHostToDevice, inputStream);
cudaMallocPitch(&dst, &outputPitch, sizeof(float)*output.cols, output.rows);
cudaMemcpy2DAsync(dst, outputPitch, output.data, sizeof(float)*output.cols, sizeof(float)*output.cols, output.rows, cudaMemcpyHostToDevice, outputStream);
cudaStreamSynchronize(inputStream); cudaStreamSynchronize(outputStream);
cudaError_t error;
switch (type)
{
case Line:
{
float theta = 45.0;
float slope = tan(theta*3.14 / 180.0);
float pitch = 30.0;
drawLine <<<blockSize, threadSize>>> (src, inputPitch, input.rows, input.cols, slope, pitch, dst, outputPitch, thickness);
break;
}
case Circle:
{
float centerX = 20.0;
float centerY = 50.0;
float radius = 80.0;
drawCircle <<<blockSize, threadSize>>> (src, inputPitch, input.rows, input.cols, centerX, centerY, radius, dst, outputPitch, thickness);
break;
}
case ArchimedeanSpiral:
{
float a = 100.0;
float b = 40.0;
drawSpiral <<<blockSize, threadSize>>> (src, inputPitch, input.rows, input.cols, a, b, dst, outputPitch, thickness);
break;
}
case Cardioid:
{
float amp = 80.0;
drawHeart <<<blockSize, threadSize>>> (src, inputPitch, input.rows, input.cols, amp, dst, outputPitch, thickness);
break;
}
default:
break;
}
error = cudaDeviceSynchronize();
if (error != cudaSuccess)
{
cout << cudaGetErrorString(error) << endl;
}
cudaMemcpy2D(output.data, sizeof(float)*output.cols, dst, outputPitch, sizeof(float)*output.cols, output.rows, cudaMemcpyDeviceToHost);
// resource releasing
cudaStreamDestroy(inputStream); cudaStreamDestroy(outputStream);
cudaFree(src); cudaFree(dst);
}
int main()
{
Mat white(Size(801, 601), CV_8U, Scalar(255)); // use odd number of size is convenient for computing
white.convertTo(white, CV_32F);
Mat result = white.clone(); // deeply copy
float time;
cudaEvent_t start, end;
cudaEventCreate(&start); cudaEventCreate(&end);
cudaEventRecord(start);
float thickness = 5;
drawCurves(white, result, Line, thickness);
drawCurves(white, result, Circle, thickness);
drawCurves(white, result, ArchimedeanSpiral, thickness);
drawCurves(white, result, Cardioid, thickness);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
cout << "time cost on device: " << time << " ms." << endl;
cudaEventDestroy(start); cudaEventDestroy(end);
result.convertTo(result, CV_8U);
string title = "CUDA";
namedWindow(title);
imshow(title, result);
waitKey(0);
return 0;
} |
38043cdda48d5c4c0cfb79929a58b07643fb1b22.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <unordered_map>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cublas.hpp>
int main() {
tf::Taskflow taskflow;
tf::Executor executor;
size_t N = 1024;
float* x = nullptr;
int* r;
int res;
std::vector<float> host(N, 0.0f);
host[200] = -100.0f; // artificially set the mid-pos as the largest
TF_CHECK_CUDA(hipMalloc(&x, N*sizeof(float)), "failed to malloc x");
TF_CHECK_CUDA(hipMalloc(&r, sizeof(int)), "failed to malloc r");
taskflow.emplace([&](tf::cudaFlow& cf){
auto h2d = cf.copy(x, host.data(), N).name("h2d");
auto child = cf.childflow([&](tf::cublasFlow& cbf){ /// childflow
cbf.amax<float>(N, x, 1, r).name("amax");
cbf.amax<float>(N, x, 1, r).name("amax");
cbf.amax<float>(N, x, 1, r).name("amax");
}).name("cublas");
auto d2h = cf.copy(&res, r, 1).name("d2h");
child.succeed(h2d)
.precede(d2h);
}).name("cudaflow");
executor.run(taskflow).wait();
taskflow.dump(std::cout);
std::cout << "res: " << res << '\n';
TF_CHECK_CUDA(hipFree(x), "failed to free x");
TF_CHECK_CUDA(hipFree(r), "failed to free r");
//std::cout << HIPBLAS_OP_N << '\n';
return 0;
}
/*#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (
hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta
){
hipblasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
for(int itr=0; itr<5; itr++) {
std::cout << "iteration " << itr << '\n';
hipError_t cudaStat;
hipblasStatus_t stat;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * M + j + 1);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
auto beg = std::chrono::steady_clock::now();
auto hptr = tf::cublas_per_thread_handle_pool.acquire(0);
auto handle = hptr->native_handle;
auto end = std::chrono::steady_clock::now();
std::cout << "create handle: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
//int version;
//cublasGetVersion(handle, &version);
//std::cout << "version is " << version << '\n';
beg = std::chrono::steady_clock::now();
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrA);
//hipblasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "set matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
end = std::chrono::steady_clock::now();
std::cout << "modify matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrA);
//hipblasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "get matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
hipFree (devPtrA);
//beg = std::chrono::steady_clock::now();
//hipblasDestroy(handle);
//end = std::chrono::steady_clock::now();
//std::cout << "destroy handle: "
// << std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
// << " us\n";
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
beg = std::chrono::steady_clock::now();
hipStream_t stream;
hipStreamCreate(&stream);
end = std::chrono::steady_clock::now();
std::cout << "create stream: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
hipStreamDestroy(stream);
end = std::chrono::steady_clock::now();
std::cout << "destroy stream: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
tf::cublas_per_thread_handle_pool.release(std::move(hptr));
end = std::chrono::steady_clock::now();
std::cout << "release handle: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - beg).count()
<< " us\n";
}
return EXIT_SUCCESS;
}*/
| 38043cdda48d5c4c0cfb79929a58b07643fb1b22.cu | #include <chrono>
#include <unordered_map>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cublas.hpp>
int main() {
tf::Taskflow taskflow;
tf::Executor executor;
size_t N = 1024;
float* x = nullptr;
int* r;
int res;
std::vector<float> host(N, 0.0f);
host[200] = -100.0f; // artificially set the mid-pos as the largest
TF_CHECK_CUDA(cudaMalloc(&x, N*sizeof(float)), "failed to malloc x");
TF_CHECK_CUDA(cudaMalloc(&r, sizeof(int)), "failed to malloc r");
taskflow.emplace([&](tf::cudaFlow& cf){
auto h2d = cf.copy(x, host.data(), N).name("h2d");
auto child = cf.childflow([&](tf::cublasFlow& cbf){ /// childflow
cbf.amax<float>(N, x, 1, r).name("amax");
cbf.amax<float>(N, x, 1, r).name("amax");
cbf.amax<float>(N, x, 1, r).name("amax");
}).name("cublas");
auto d2h = cf.copy(&res, r, 1).name("d2h");
child.succeed(h2d)
.precede(d2h);
}).name("cudaflow");
executor.run(taskflow).wait();
taskflow.dump(std::cout);
std::cout << "res: " << res << '\n';
TF_CHECK_CUDA(cudaFree(x), "failed to free x");
TF_CHECK_CUDA(cudaFree(r), "failed to free r");
//std::cout << CUBLAS_OP_N << '\n';
return 0;
}
/*#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (
cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta
){
cublasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
for(int itr=0; itr<5; itr++) {
std::cout << "iteration " << itr << '\n';
cudaError_t cudaStat;
cublasStatus_t stat;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * M + j + 1);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
auto beg = std::chrono::steady_clock::now();
auto hptr = tf::cublas_per_thread_handle_pool.acquire(0);
auto handle = hptr->native_handle;
auto end = std::chrono::steady_clock::now();
std::cout << "create handle: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
//int version;
//cublasGetVersion(handle, &version);
//std::cout << "version is " << version << '\n';
beg = std::chrono::steady_clock::now();
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrA);
//cublasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "set matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
end = std::chrono::steady_clock::now();
std::cout << "modify matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrA);
//cublasDestroy(handle);
return EXIT_FAILURE;
}
end = std::chrono::steady_clock::now();
std::cout << "get matrix: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
cudaFree (devPtrA);
//beg = std::chrono::steady_clock::now();
//cublasDestroy(handle);
//end = std::chrono::steady_clock::now();
//std::cout << "destroy handle: "
// << std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
// << " us\n";
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
beg = std::chrono::steady_clock::now();
cudaStream_t stream;
cudaStreamCreate(&stream);
end = std::chrono::steady_clock::now();
std::cout << "create stream: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
cudaStreamDestroy(stream);
end = std::chrono::steady_clock::now();
std::cout << "destroy stream: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - beg).count()
<< " us\n";
beg = std::chrono::steady_clock::now();
tf::cublas_per_thread_handle_pool.release(std::move(hptr));
end = std::chrono::steady_clock::now();
std::cout << "release handle: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - beg).count()
<< " us\n";
}
return EXIT_SUCCESS;
}*/
|
ca116a816929766de362bcc1b837ff68efed4267.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dot_gpu.h"
__global__ void dot__(float *v1, float *v2, float *res, int N) {
__shared__ float cache [threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0.0;
while (tid < N) {
temp += v1[tid] * v2[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) {
res[blockIdx.x] = cache[0];
}
}
float * gpu_dot (float *v1, float *v2, size_t N) {
float *dev_v1, *dev_v2, *dev_res, *res;
res = new float[blocksPerGrid];
hipMalloc((void**)&dev_v1, N * sizeof(float));
hipMalloc((void**)&dev_v2, N * sizeof(float));
hipMalloc((void**)&dev_res, blocksPerGrid * sizeof(float));
hipMemcpy(dev_v1, v1, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_v2, v2, N * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot__), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_v1, dev_v2, dev_res, (int)N);
hipMemcpy(res, dev_res, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_v1);
hipFree(dev_v2);
hipFree(dev_res);
return res;
} | ca116a816929766de362bcc1b837ff68efed4267.cu | #include "dot_gpu.h"
__global__ void dot__(float *v1, float *v2, float *res, int N) {
__shared__ float cache [threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0.0;
while (tid < N) {
temp += v1[tid] * v2[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) {
res[blockIdx.x] = cache[0];
}
}
float * gpu_dot (float *v1, float *v2, size_t N) {
float *dev_v1, *dev_v2, *dev_res, *res;
res = new float[blocksPerGrid];
cudaMalloc((void**)&dev_v1, N * sizeof(float));
cudaMalloc((void**)&dev_v2, N * sizeof(float));
cudaMalloc((void**)&dev_res, blocksPerGrid * sizeof(float));
cudaMemcpy(dev_v1, v1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_v2, v2, N * sizeof(float), cudaMemcpyHostToDevice);
dot__<<<blocksPerGrid, threadsPerBlock>>>(dev_v1, dev_v2, dev_res, (int)N);
cudaMemcpy(res, dev_res, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_v1);
cudaFree(dev_v2);
cudaFree(dev_res);
return res;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.